var/home/core/zuul-output/0000755000175000017500000000000015111441734014526 5ustar corecorevar/home/core/zuul-output/logs/0000755000175000017500000000000015111445653015476 5ustar corecorevar/home/core/zuul-output/logs/kubelet.log0000644000000000000000003713022315111445644017703 0ustar rootrootNov 26 00:07:05 crc systemd[1]: Starting Kubernetes Kubelet... Nov 26 00:07:05 crc restorecon[4695]: Relabeled /var/lib/kubelet/config.json from system_u:object_r:unlabeled_t:s0 to system_u:object_r:container_var_lib_t:s0 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/device-plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/device-plugins/kubelet.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/volumes/kubernetes.io~configmap/nginx-conf/..2025_02_23_05_40_35.4114275528/nginx.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/22e96971 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/21c98286 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8/containers/networking-console-plugin/0f1869e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c15,c25 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/46889d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/5b6a5969 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/setup/6c7921f5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4804f443 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/2a46b283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/a6b5573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/4f88ee5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c225,c458 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/5a4eee4b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c963 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/d1b160f5dda77d281dd8e69ec8d817f9/containers/kube-rbac-proxy-crio/cd87c521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c215,c682 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_33_42.2574241751/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/38602af4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/1483b002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/0346718b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/d3ed4ada not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/3bb473a5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/8cd075a9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/00ab4760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/containers/router/54a21c09 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c24 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/70478888 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/43802770 not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/955a0edc not reset as customized by admin to system_u:object_r:container_file_t:s0:c176,c499 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/bca2d009 not reset as customized by admin to system_u:object_r:container_file_t:s0:c140,c1009 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/37a5e44f-9a88-4405-be8a-b645485e7312/containers/network-operator/b295f9bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c589,c726 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..2025_02_23_05_21_22.3617465230/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-binary-copy/cnibincopy.sh not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..2025_02_23_05_21_22.2050650026/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes/kubernetes.io~configmap/cni-sysctl-allowlist/allowlist.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/bc46ea27 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5731fc1b not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/egress-router-binary-copy/5e1b2a3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/943f0936 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/3f764ee4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/cni-plugins/8695e3f9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/aed7aa86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/c64d7448 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/bond-cni-plugin/0ba16bd2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/207a939f not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/54aa8cdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/routeoverride-cni/1f5fa595 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/bf9c8153 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/47fba4ea not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni-bincopy/7ae55ce9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7906a268 not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/ce43fa69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/whereabouts-cni/7fc7ea3a not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/d8c38b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c203,c924 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/9ef015fb not reset as customized by admin to system_u:object_r:container_file_t:s0:c138,c778 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/containers/kube-multus-additional-cni-plugins/b9db6a41 not reset as customized by admin to system_u:object_r:container_file_t:s0:c574,c582 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/b1733d79 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/afccd338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/network-metrics-daemon/9df0a185 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/18938cf8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c476,c820 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/7ab4eb23 not reset as customized by admin to system_u:object_r:container_file_t:s0:c272,c818 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/containers/kube-rbac-proxy/56930be6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c432,c991 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_35.630010865 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..2025_02_23_05_21_35.1088506337/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes/kubernetes.io~configmap/ovnkube-config/ovnkube.conf not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/0d8e3722 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/d22b2e76 not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/kube-rbac-proxy/e036759f not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/2734c483 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/57878fe7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/3f3c2e58 not reset as customized by admin to system_u:object_r:container_file_t:s0:c89,c211 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/375bec3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c382,c850 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/containers/ovnkube-cluster-manager/7bc41e08 not reset as customized by admin to system_u:object_r:container_file_t:s0:c440,c975 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/48c7a72d not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/4b66701f not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/containers/download-server/a5a1c202 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..2025_02_23_05_21_40.3350632666/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-cert-acceptance-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/ovnkube-identity-cm/additional-pod-admission-cond.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..2025_02_23_05_21_40.1388695756 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/volumes/kubernetes.io~configmap/env-overrides/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/26f3df5b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/6d8fb21d not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/webhook/50e94777 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208473b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/ec9e08ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3b787c39 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/208eaed5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/93aa3a2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/ef543e1b-8068-4ea3-b32a-61027b32e95d/containers/approver/3c697968 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c22 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/ba950ec9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/cb5cdb37 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/3b6479f0-333b-4a96-9adf-2099afdc2447/containers/network-check-target-container/f2df9827 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..2025_02_23_05_22_30.473230615/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_24_06_22_02.1904938450/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/fedaa673 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/9ca2df95 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/machine-config-operator/b2d7460e not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2207853c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/241c1c29 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/containers/kube-rbac-proxy/2d910eaf not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/..2025_02_23_05_23_49.3726007728/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/..2025_02_23_05_23_49.841175008/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/etcd-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178 not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.843437178/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/c6c0f2e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/399edc97 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8049f7cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/0cec5484 not reset as customized by admin to system_u:object_r:container_file_t:s0:c263,c871 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/312446d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c406,c828 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/containers/etcd-operator/8e56a35d not reset as customized by admin to system_u:object_r:container_file_t:s0:c84,c419 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.133159589/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/2d30ddb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/eca8053d not reset as customized by admin to system_u:object_r:container_file_t:s0:c380,c909 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/c3a25c9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c168,c522 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/containers/kube-controller-manager-operator/b9609c22 not reset as customized by admin to system_u:object_r:container_file_t:s0:c108,c511 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/e8b0eca9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/b36a9c3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/dns-operator/38af7b07 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/ae821620 not reset as customized by admin to system_u:object_r:container_file_t:s0:c106,c418 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/baa23338 not reset as customized by admin to system_u:object_r:container_file_t:s0:c529,c711 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/containers/kube-rbac-proxy/2c534809 not reset as customized by admin to system_u:object_r:container_file_t:s0:c968,c969 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3532625537/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/59b29eae not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/c91a8e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c381 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/4d87494a not reset as customized by admin to system_u:object_r:container_file_t:s0:c442,c857 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/containers/kube-scheduler-operator-container/1e33ca63 not reset as customized by admin to system_u:object_r:container_file_t:s0:c661,c999 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/8dea7be2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d0b04a99 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/kube-rbac-proxy/d84f01e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/4109059b not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/a7258a3e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/containers/package-server-manager/05bdf2b6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/f3261b51 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/315d045e not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/5fdcf278 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/d053f757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/containers/control-plane-machine-set-operator/c2850dc7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..2025_02_23_05_22_30.2390596521/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes/kubernetes.io~configmap/marketplace-trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fcfb0b2b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c7ac9b7d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/fa0c0d52 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/c609b6ba not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/2be6c296 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/89a32653 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/4eb9afeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/containers/marketplace-operator/13af6efa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/b03f9724 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/e3d105cc not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/containers/olm-operator/3aed4d83 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1906041176/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/0765fa6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/2cefc627 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/3dcc6345 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/containers/kube-storage-version-migrator-operator/365af391 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-SelfManagedHA-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-TechPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-DevPreviewNoUpgrade.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes/kubernetes.io~empty-dir/available-featuregates/featureGate-Hypershift-Default.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b1130c0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/236a5913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-api/b9432e26 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/5ddb0e3f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/986dc4fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/8a23ff9a not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/9728ae68 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/containers/openshift-config-operator/665f31d0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c12 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1255385357/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/..2025_02_23_05_23_57.573792656/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/service-ca-bundle/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_22_30.3254245399/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes/kubernetes.io~configmap/trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/136c9b42 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/98a1575b not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/cac69136 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/5deb77a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/containers/authentication-operator/2ae53400 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3608339744/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes/kubernetes.io~configmap/config/operator-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/e46f2326 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/dc688d3c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/3497c3cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/containers/service-ca-operator/177eb008 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.3819292994/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/af5a2afa not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/d780cb1f not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/49b0f374 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/containers/openshift-apiserver-operator/26fbb125 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c13 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.3244779536/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/cf14125a not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/b7f86972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/e51d739c not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/88ba6a69 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/669a9acf not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/5cd51231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/75349ec7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/15c26839 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/45023dcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/ingress-operator/2bb66a50 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/64d03bdd not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/ab8e7ca0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/containers/kube-rbac-proxy/bb9be25f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c11 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_22_30.2034221258/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/9a0b61d3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/d471b9d2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/containers/cluster-image-registry-operator/8cb76b8e not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/11a00840 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/ec355a92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/containers/catalog-operator/992f735e not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..2025_02_23_05_22_30.1782968797/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 00:07:05 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d59cdbbc not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/72133ff0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/c56c834c not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/d13724c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/containers/openshift-controller-manager-operator/0a498258 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c14 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa471982 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fc900d92 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/containers/machine-config-server/fa7d68da not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/4bacf9b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/424021b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/migrator/fc2e31a3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/f51eefac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/c8997f2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/containers/graceful-termination/7481f599 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..2025_02_23_05_22_49.2255460704/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes/kubernetes.io~configmap/signing-cabundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/fdafea19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/d0e1c571 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/ee398915 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/containers/service-ca-controller/682bb6b8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a3e67855 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/a989f289 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/setup/915431bd not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/7796fdab not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/dcdb5f19 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-ensure-env-vars/a3aaa88c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/5508e3e6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/160585de not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-resources-copy/e99f8da3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/8bc85570 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/a5861c91 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcdctl/84db1135 not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/9e1a6043 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/c1aba1c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd/d55ccd6d not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/971cc9f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/8f2e3dcf not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-metrics/ceb35e9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/1c192745 not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/5209e501 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-readyz/f83de4df not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/e7b978ac not reset as customized by admin to system_u:object_r:container_file_t:s0:c294,c884 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/c64304a1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c1016 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/2139d3e2895fc6797b9c76a1b4c9886d/containers/etcd-rev/5384386b not reset as customized by admin to system_u:object_r:container_file_t:s0:c666,c920 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/cce3e3ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/multus-admission-controller/8fb75465 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/740f573e not reset as customized by admin to system_u:object_r:container_file_t:s0:c435,c756 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/containers/kube-rbac-proxy/32fd1134 not reset as customized by admin to system_u:object_r:container_file_t:s0:c268,c620 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/0a861bd3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/80363026 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/containers/serve-healthcheck-canary/bfa952a8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c19,c24 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..2025_02_23_05_33_31.2122464563/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..2025_02_23_05_33_31.333075221 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/793bf43d not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/7db1bb6e not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/kube-rbac-proxy/4f6a0368 not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/c12c7d86 not reset as customized by admin to system_u:object_r:container_file_t:s0:c381,c387 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/36c4a773 not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/4c1e98ae not reset as customized by admin to system_u:object_r:container_file_t:s0:c142,c438 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/containers/machine-approver-controller/a4c8115c not reset as customized by admin to system_u:object_r:container_file_t:s0:c129,c158 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/setup/7db1802e not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver/a008a7ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-syncer/2c836bac not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-cert-regeneration-controller/0ce62299 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-insecure-readyz/945d2457 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/containers/kube-apiserver-check-endpoints/7d5c1dd8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c97,c980 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/advanced-cluster-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-broker-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq-streams-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amq7-interconnect-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-automation-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ansible-cloud-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry-3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bamoe-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/index.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/businessautomation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cephcsi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cincinnati-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-kube-descheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/compliance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/container-security-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/costmanagement-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cryostat-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datagrid/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devspaces/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devworkspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dpu-network-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eap/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/file-integrity-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-console/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fuse-online/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gatekeeper-operator-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jws-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kernel-module-management-hub/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kiali-ossm/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logic-operator-rhel8/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lvms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mcg-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mta-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mtv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-client-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-csi-addons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-multicluster-orchestrator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odf-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odr-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/bundle-v1.15.0.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/channel.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-cert-manager-operator/package.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-custom-metrics-autoscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-pipelines-operator-rh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-secondary-scheduler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-bridge-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/quay-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/recipe/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/red-hat-hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redhat-oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rh-service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhacs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhbk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhdh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhods-prometheus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhpam-kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhsso-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rook-ceph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/run-once-duration-override-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sandboxed-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/security-profiles-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/serverless-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-registry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/servicemeshoperator3/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/submariner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tang-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustee-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volsync-product/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/catalog/web-terminal/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/bc8d0691 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/6b76097a not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-utilities/34d1af30 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/312ba61c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/645d5dd1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/extract-content/16e825f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/4cf51fc9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/2a23d348 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/containers/registry-server/075dbd49 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/..2025_02_24_06_09_13.3521195566/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes/kubernetes.io~configmap/serviceca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/dd585ddd not reset as customized by admin to system_u:object_r:container_file_t:s0:c377,c642 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/17ebd0ab not reset as customized by admin to system_u:object_r:container_file_t:s0:c338,c343 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/containers/node-ca/005579f4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c842,c986 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_23_05_23_11.449897510/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_23_05_23_11.1287037894 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..2025_02_23_05_23_11.1301053334/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes/kubernetes.io~configmap/audit-policies/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/bf5f3b9c not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/af276eb7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/fix-audit-permissions/ea28e322 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/692e6683 not reset as customized by admin to system_u:object_r:container_file_t:s0:c49,c263 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/871746a7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c701 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/containers/oauth-apiserver/4eb2e958 not reset as customized by admin to system_u:object_r:container_file_t:s0:c764,c897 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..2025_02_24_06_09_06.2875086261/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/console-config/console-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_09_06.286118152/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..2025_02_24_06_09_06.3865795478/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/oauth-serving-cert/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..2025_02_24_06_09_06.584414814/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/ca9b62da not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/containers/console/0edd6fce not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.2406383837/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.openshift-global-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/config/openshift-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.1071801880/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877 not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..2025_02_24_06_20_07.2494444877/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes/kubernetes.io~configmap/proxy-ca-bundles/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/containers/controller-manager/89b4555f not reset as customized by admin to system_u:object_r:container_file_t:s0:c14,c22 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..2025_02_23_05_23_22.4071100442/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes/kubernetes.io~configmap/config-volume/Corefile not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/655fcd71 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/0d43c002 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/dns/e68efd17 not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/9acf9b65 not reset as customized by admin to system_u:object_r:container_file_t:s0:c457,c841 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/5ae3ff11 not reset as customized by admin to system_u:object_r:container_file_t:s0:c55,c1022 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/containers/kube-rbac-proxy/1e59206a not reset as customized by admin to system_u:object_r:container_file_t:s0:c466,c972 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/27af16d1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c304,c1017 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/7918e729 not reset as customized by admin to system_u:object_r:container_file_t:s0:c853,c893 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/containers/dns-node-resolver/5d976d0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c585,c981 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..2025_02_23_05_38_56.1112187283/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/config/controller-config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_23_05_38_56.2839772658/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes/kubernetes.io~configmap/trusted-ca/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/d7f55cbb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/f0812073 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/1a56cbeb not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/7fdd437e not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/containers/console-operator/cdfb5652 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c25 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..2025_02_24_06_17_29.3844392896/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/etcd-serving-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..2025_02_24_06_17_29.848549803/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..2025_02_24_06_17_29.780046231/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/audit/policy.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..2025_02_24_06_17_29.2926008347/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/image-import-ca/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..2025_02_24_06_17_29.2729721485/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes/kubernetes.io~configmap/trusted-ca-bundle/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/fix-audit-permissions/fb93119e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver/f1e8fc0e not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/containers/openshift-apiserver-check-endpoints/218511f3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c336,c787 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes/kubernetes.io~empty-dir/tmpfs/k8s-webhook-server/serving-certs not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/ca8af7b3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/72cc8a75 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/containers/packageserver/6e8a3760 not reset as customized by admin to system_u:object_r:container_file_t:s0:c12,c18 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..2025_02_23_05_27_30.557428972/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes/kubernetes.io~configmap/service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4c3455c0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/2278acb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/4b453e4f not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/containers/cluster-version-operator/3ec09bda not reset as customized by admin to system_u:object_r:container_file_t:s0:c5,c6 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..2025_02_24_06_25_03.422633132/anchors/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/trusted-ca/anchors not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..2025_02_24_06_25_03.3594477318/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/image-registry.openshift-image-registry.svc.cluster.local..5000 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~configmap/registry-certificates/default-route-openshift-image-registry.apps-crc.testing not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/edk2/cacerts.bin not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/java/cacerts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/openssl/ca-bundle.trust.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/tls-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/email-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/objsign-ca-bundle.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2ae6433e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fde84897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75680d2e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/openshift-service-serving-signer_1740288168.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/facfc4fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f5a969c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CFCA_EV_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9ef4a08a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ingress-operator_1740288202.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2f332aed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/248c8271.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d10a21f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ACCVRAIZ1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a94d09e5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c9a4d3b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40193066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd8c0d63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b936d1c6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CA_Disig_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4fd49c6c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AC_RAIZ_FNMT-RCM_SERVIDORES_SEGUROS.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b81b93f0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f9a69fa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b30d5fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ANF_Secure_Server_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b433981b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93851c9e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9282e51c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7dd1bc4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Actalis_Authentication_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/930ac5d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f47b495.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e113c810.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5931b5bc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Commercial.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2b349938.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e48193cf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/302904dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a716d4ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Networking.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/93bc0acc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/86212b19.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certigna_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b727005e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbc54cab.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f51bb24c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c28a8a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AffirmTrust_Premium_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9c8dfbd4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ccc52f49.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cb1c3204.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ce5e74ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd08c599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6d41d539.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb5fa911.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e35234b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8cb5ee0f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a7c655d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f8fc53da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Amazon_Root_CA_4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/de6d66f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d41b5e2a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/41a3f684.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1df5a75f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_2011.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e36a6752.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b872f2b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9576d26b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/228f89db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_ECC_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fb717492.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d21b73c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b1b94ef.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/595e996b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Atos_TrustedRoot_Root_CA_RSA_TLS_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b46e03d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/128f4b91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_3_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81f2d2b1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Autoridad_de_Certificacion_Firmaprofesional_CIF_A62634068.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3bde41ac.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d16a5865.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_EC-384_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0179095f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ffa7f1eb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9482e63a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4dae3dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/BJCA_Global_Root_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e359ba6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7e067d03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/95aff9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7746a63.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Baltimore_CyberTrust_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/653b494a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3ad48a91.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Buypass_Class_2_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/54657681.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/82223c44.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8de2f56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2d9dafe4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d96b65e2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee64a828.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/40547a79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5a3f0ff8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a780d93.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/34d996fb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/eed8c118.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/89c02a45.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b1159c4c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/COMODO_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d6325660.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d4c339cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8312c4c1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certainly_Root_E1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8508e720.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5fdd185d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48bec511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/69105f4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0b9bc432.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Certum_Trusted_Network_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/32888f65.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b03dec0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/219d9499.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_ECC_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5acf816d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbf06781.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-01.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc99f41e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/CommScope_Public_Trust_RSA_Root-02.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/AAA_Certificate_Services.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/985c1f52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8794b4e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_BR_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e7c037b4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ef954a4e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_EV_Root_CA_1_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2add47b6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/90c5a3c8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0f3e76e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/53a1b57a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/D-TRUST_Root_Class_3_CA_2_EV_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5ad8a5d6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/68dd7389.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d04f354.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d6437c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/062cdee6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bd43e1dd.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Assured_ID_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7f3d5d1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c491639e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3513523f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/399e7759.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/feffd413.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d18e9066.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/607986c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c90bc37d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1b0f7e5c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e08bfd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Global_Root_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dd8e9d41.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed39abd0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a3418fda.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bc3f2570.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_High_Assurance_EV_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/244b5494.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/81b9768f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4be590e0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_ECC_P384_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9846683b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/252252d2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e8e7201.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_TLS_RSA4096_Root_G5.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d52c538d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c44cc0c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/DigiCert_Trusted_Root_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/75d1b2ed.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a2c66da8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ecccd8db.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust.net_Certification_Authority__2048_.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/aee5f10d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3e7271e8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0e59380.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4c3982f2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b99d060.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf64f35b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0a775a30.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/002c0b4f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cc450945.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_EC1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/106f3e4d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b3fb433b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GlobalSign.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4042bcee.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/02265526.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/455f1b52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0d69c7e1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9f727ac7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Entrust_Root_Certification_Authority_-_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5e98733a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0cd152c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dc4d6a89.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6187b673.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/FIRMAPROFESIONAL_CA_ROOT-A_WEB.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ba8887ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/068570d1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f081611a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/48a195d8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GDCA_TrustAUTH_R5_ROOT.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f6fa695.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab59055e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b92fd57f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GLOBALTRUST_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fa5da96b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ec40989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7719f463.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/GTS_Root_R1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1001acf7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f013ecaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/626dceaf.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c559d742.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1d3472b9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9479c8c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a81e292b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4bfab552.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_E46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Go_Daddy_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e071171e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/57bcb2da.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_ECC_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ab5346f4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5046c355.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HARICA_TLS_RSA_Root_CA_2021.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/865fbdf9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da0cfd1d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/85cde254.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_ECC_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cbb3f32b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureSign_RootCA11.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hellenic_Academic_and_Research_Institutions_RootCA_2015.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5860aaa6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/31188b5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/HiPKI_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c7f1359b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f15c80c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Hongkong_Post_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/09789157.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ISRG_Root_X2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/18856ac4.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e09d511.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Commercial_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cf701eeb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d06393bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/IdenTrust_Public_Sector_Root_CA_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/10531352.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Izenpe.com.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SecureTrust_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b0ed035a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsec_e-Szigno_Root_CA_2009.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8160b96c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e8651083.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2c63f966.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_ECC_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d89cda1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/01419da9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_RSA_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7a5b843.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Microsoft_RSA_Root_Certificate_Authority_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bf53fb88.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9591a472.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3afde786.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Gold_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NAVER_Global_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3fb36b73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d39b0a2c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a89d74c2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/cd58d51e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b7db1890.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/NetLock_Arany__Class_Gold__F__tan__s__tv__ny.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/988a38cb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/60afe812.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f39fc864.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5443e9e3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GB_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e73d606e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dfc0fe80.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b66938e9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1e1eab7c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/OISTE_WISeKey_Global_Root_GC_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/773e07ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c899c73.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d59297b8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ddcda989.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_1_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/749e9e03.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/52b525c7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_RootCA3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d7e8dc79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a819ef2.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/08063a00.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6b483515.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_2_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/064e0aa9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1f58a078.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6f7454b3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7fa05551.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76faf6c0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9339512a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f387163d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee37c333.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/QuoVadis_Root_CA_3_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e18bfb83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e442e424.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fe8a2cd8.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/23f4c490.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5cd81ad7.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f0c70a8d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7892ad52.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SZAFIR_ROOT_CA2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4f316efb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_EV_Root_Certification_Authority_RSA_R2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/06dc52d5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/583d0756.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Sectigo_Public_Server_Authentication_Root_R46.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_ECC.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0bf05006.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/88950faa.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9046744a.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/3c860d51.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_Root_Certification_Authority_RSA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/6fa5da56.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/33ee480d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Secure_Global_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/63a2c897.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SSL.com_TLS_ECC_Root_CA_2022.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/bdacca6f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ff34af3f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/dbff3a01.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Security_Communication_ECC_RootCA1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_C1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Class_2_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/406c9bb1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_C3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Starfield_Services_Root_Certificate_Authority_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/SwissSign_Silver_CA_-_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/99e1b953.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/T-TeleSec_GlobalRoot_Class_3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/14bc7599.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TUBITAK_Kamu_SM_SSL_Kok_Sertifikasi_-_Surum_1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Global_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/7a3adc42.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TWCA_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f459871d.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_ECC_Root_2020.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_Root_CA_-_G1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telekom_Security_TLS_RSA_Root_2023.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TeliaSonera_Root_CA_v1.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Telia_Root_CA_v2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8f103249.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f058632f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-certificates.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9bf03295.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/98aaf404.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TrustAsia_Global_Root_CA_G4.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1cef98f5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/073bfcc5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/2923b3f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f249de83.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/edcbddb5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/emSign_ECC_Root_CA_-_G3.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P256_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9b5697b0.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/1ae85e5e.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/b74d2bd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/Trustwave_Global_ECC_P384_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/d887a5bb.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9aef356c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/TunTrust_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fd64f3fc.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e13665f9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Extended_Validation_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/0f5dc4f3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/da7377f6.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/UCA_Global_G2_Root.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/c01eb047.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/304d27c3.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ed858448.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_ECC_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/f30dd6ad.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/04f60c28.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/vTrus_ECC_Root_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/USERTrust_RSA_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/fc5a8f99.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/35105088.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ee532fd5.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/XRamp_Global_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/706f604c.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/76579174.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/8d86cdd1.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/882de061.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/certSIGN_ROOT_CA_G2.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/5f618aec.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/a9d40e02.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e-Szigno_Root_CA_2017.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/e868b802.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/83e9984f.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ePKI_Root_Certification_Authority.pem not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/ca6e4ad9.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/9d6523ce.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/4b718d9b.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes/kubernetes.io~empty-dir/ca-trust-extracted/pem/directory-hash/869fbf79.0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/containers/registry/f8d22bdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c10,c16 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/6e8bbfac not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/54dd7996 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator/a4f1bb05 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/207129da not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/c1df39e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/containers/cluster-samples-operator-watch/15b8f1cd not reset as customized by admin to system_u:object_r:container_file_t:s0:c9,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3523263858/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..2025_02_23_05_27_49.3256605594/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes/kubernetes.io~configmap/images/images.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/77bd6913 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/2382c1b1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/kube-rbac-proxy/704ce128 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/70d16fe0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/bfb95535 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/containers/machine-api-operator/57a8e8e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c0,c15 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..2025_02_23_05_27_49.3413793711/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/1b9d3e5e not reset as customized by admin to system_u:object_r:container_file_t:s0:c107,c917 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/fddb173c not reset as customized by admin to system_u:object_r:container_file_t:s0:c202,c983 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/containers/kube-apiserver-operator/95d3c6c4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c219,c404 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/bfb5fff5 not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/2aef40aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/9d751cbb-f2e2-430d-9754-c882a5e924a5/containers/check-endpoints/c0391cad not reset as customized by admin to system_u:object_r:container_file_t:s0:c20,c21 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/1119e69d not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/660608b4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager/8220bd53 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/85f99d5c not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/cluster-policy-controller/4b0225f6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/9c2a3394 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-cert-syncer/e820b243 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/1ca52ea0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c776,c1007 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/f614b9022728cf315e60c057852e563e/containers/kube-controller-manager-recovery-controller/e6988e45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c214,c928 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes/kubernetes.io~configmap/mcc-auth-proxy-config/..2025_02_24_06_09_21.2517297950/config-file.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/6655f00b not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/98bc3986 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/machine-config-controller/08e3458a not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/2a191cb0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/6c4eeefb not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/containers/kube-rbac-proxy/f61a549c not reset as customized by admin to system_u:object_r:container_file_t:s0:c4,c17 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/24891863 not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/hostpath-provisioner/fbdfd89c not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/9b63b3bc not reset as customized by admin to system_u:object_r:container_file_t:s0:c37,c572 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/liveness-probe/8acde6d6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/node-driver-registrar/59ecbba3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/containers/csi-provisioner/685d4be3 not reset as customized by admin to system_u:object_r:container_file_t:s0:c318,c553 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..2025_02_24_06_20_07.341639300/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/config.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.client-ca.configmap not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/config/openshift-route-controller-manager.serving-cert.secret not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851 not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..2025_02_24_06_20_07.2950937851/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes/kubernetes.io~configmap/client-ca/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/containers/route-controller-manager/feaea55e not reset as customized by admin to system_u:object_r:container_file_t:s0:c2,c23 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abinitio-runtime-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/accuknox-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aci-containers-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airlock-microgateway/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ako-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloy/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anchore-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-cloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/appdynamics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-dcap-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ccm-node-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cfm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cilium-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloud-native-postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudera-streams-messaging-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudnative-pg/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cnfv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/conjur-follower-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/coroot-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cte-k8s-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-deploy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/digitalai-release-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edb-hcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/elasticsearch-eck-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/federatorai-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fujitsu-enterprise-postgres-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/function-mesh/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/harness-gitops-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hcp-terraform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hpe-ezmeral-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-application-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-directory-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-dr-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-licensing-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infoscale-sds-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infrastructure-asset-orchestrator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-device-plugins-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/intel-kubernetes-power-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-openshift-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8s-triliovault/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-ati-updates/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-framework/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-ingress/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-licensing/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-kcos-sso/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-load-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-loadcore-agents/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nats-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-nimbusmosaic-dusim/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-rest-api-browser-v1/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-appsec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-db/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-diagnostics/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-logging/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-migration/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-msg-broker/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-notifications/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-stats-dashboards/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-storage/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-test-core/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-wap-ui/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keysight-websocket-service/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kong-gateway-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubearmor-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lenovo-locd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memcached-operator-ogaye/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/memory-machine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-enterprise/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netapp-spark-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-adm-agent-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netscaler-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-repository-ha-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nginx-ingress-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nim-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxiq-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nxrm-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odigos-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/open-liberty-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftartifactoryha-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshiftxray-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/operator-certification-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pmem-csi-operator-os/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-component-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/runtime-fabric-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sanstoragecsi-operator-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/smilecdr-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sriov-fec/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-commons-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stackable-zookeeper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-tsc-client-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tawon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tigera-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vcp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/webotx-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/63709497 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/d966b7fd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-utilities/f5773757 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/81c9edb9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/57bf57ee not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/extract-content/86f5e6aa not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/0aabe31d not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/d2af85c2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/containers/registry-server/09d157d9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/3scale-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-acmpca-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigateway-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-apigatewayv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-applicationautoscaling-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-athena-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudfront-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudtrail-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatch-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-cloudwatchlogs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-documentdb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-dynamodb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ec2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecr-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ecs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-efs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eks-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elasticache-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-elbv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-emrcontainers-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-eventbridge-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-iam-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kafka-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-keyspaces-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kinesis-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-kms-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-lambda-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-memorydb-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-mq-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-networkfirewall-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-opensearchservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-organizations-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-pipes-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-prometheusservice-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-rds-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-recyclebin-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-route53resolver-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-s3-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sagemaker-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-secretsmanager-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ses-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sfn-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sns-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-sqs-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-ssm-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ack-wafv2-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/airflow-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alloydb-omni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/alvearie-imaging-ingestion/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/amd-gpu-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/analytics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/annotationlab/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicast-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-api-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurio-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apicurito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/apimatic-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/application-services-metering-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aqua/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/argocd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/assisted-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/authorino-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/automotive-infra/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aws-efs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/awss3-operator-registry/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/azure-service-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/beegfs-csi-driver-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/bpfman-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-k/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/camel-karavan-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cass-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cert-utils-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-aas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-impairment-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cluster-manager/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/codeflare-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-kubevirt-hyperconverged/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-trivy-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/community-windows-machine-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/customized-user-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cxl-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dapr-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datatrucker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dbaas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/debezium-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dell-csm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/deployment-validation-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/devopsinabox/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-amlen-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eclipse-che/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ecr-secret-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/edp-keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eginnovations-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/egressip-ipam-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ember-csi-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/etcd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/eventing-kogito/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/external-secrets-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/falcon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fence-agents-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flink-kubernetes-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k8gb/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/fossul-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/github-arc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitops-primer/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/gitwebhook-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/global-load-balancer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/grafana-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/group-sync-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hawtio-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hazelcast-platform-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hedvig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hive-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/horreum-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/hyperfoil-bundle/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-block-csi-operator-community/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-security-verify-access-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibm-spectrum-scale-csi-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ibmcloud-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/infinispan/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/integrity-shield-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ipfs-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/istio-workspace-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/jaeger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kaoto-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keda/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keepalived-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/keycloak-permissions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/klusterlet/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kogito-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/koku-metrics-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/konveyor-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/korrel8r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kuadrant-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kube-green/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubecost/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubernetes-imagepuller-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/l5-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/layer7-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lbconfig-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/lib-bucket-provisioner/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/limitador-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/logging-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/loki-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/machine-deletion-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mariadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marin3r/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mercury-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/microcks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-atlas-kubernetes/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/mongodb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/move2kube-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multi-nic-cni-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-global-hub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/multicluster-operators-subscription/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/must-gather-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/namespace-configuration-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ncn-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ndmspc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/netobserv-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-community-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nexus-operator-m88i/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nfs-provisioner-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nlp-server/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-discovery-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-healthcheck-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/node-maintenance-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/nsm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oadp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/observability-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/oci-ccm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ocm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/odoo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opendatahub-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openebs/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-nfd-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-node-upgrade-mutex-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/openshift-qiskit-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/opentelemetry-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patch-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/patterns-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pcc-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pelorus-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/percona-xtradb-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/portworx-essentials/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/postgresql/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/proactive-node-scaling-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/project-quay/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometheus-exporter-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/prometurbo/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pubsubplus-eventbroker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pulp-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-cluster-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rabbitmq-messaging-topology-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/reportportal-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/resource-locker-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/rhoas-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ripsaw/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sailoperator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-commerce-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-data-intelligence-observer-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sap-hana-express-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/self-node-remediation/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/service-binding-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/shipwright-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sigstore-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/silicom-sts-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/skupper-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snapscheduler/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/snyk-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/socmmd/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonar-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosivio/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sonataflow-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/sosreport-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/spark-helm-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/special-resource-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/stolostron-engine/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/strimzi-kafka-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/syndesis/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tagger/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tempo-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tf-controller/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/tidb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trident-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/trustify-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ucs-ci-solutions-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/universal-crossplane/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/varnish-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vault-config-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/verticadb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/volume-expander-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/wandb-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/windup-operator/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yaks/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c0fe7256 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/c30319e4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-utilities/e6b1dd45 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/2bb643f0 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/920de426 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/extract-content/70fa1e87 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/a1c12a2f not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/9442e6c7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/containers/registry-server/5b45ec72 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/abot-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aerospike-kubernetes-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/aikit-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzo-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzograph-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/anzounstructured-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cloudbees-ci-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/cockroachdb-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/crunchy-postgres-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/datadog-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/dynatrace-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/entando-k8s-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/flux/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/instana-agent-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/iomesh-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/joget-dx8-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/k10-kasten-operator-term-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubemq-operator-marketplace-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/kubeturbo-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/linstor-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/marketplace-games-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/model-builder-for-vision-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/neuvector-certified-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/ovms-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/pachyderm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/redis-enterprise-operator-cert-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/seldon-deploy-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-paygo-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/starburst-enterprise-helm-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/t8c-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/timemachine-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/vfunction-server-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/xcrypt-operator-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/yugabyte-platform-operator-bundle-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/catalog/zabbix-operator-certified-rhmp/catalog.json not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/00000-1.psg.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/db.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/index.pmt not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/main.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/db/overflow.pix not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/catalog-content/cache/pogreb.v1/digest not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes/kubernetes.io~empty-dir/utilities/copy-content not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/3c9f3a59 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/1091c11b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-utilities/9a6821c6 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/ec0c35e2 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/517f37e7 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/extract-content/6214fe78 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/ba189c8b not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/351e4f31 not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/containers/registry-server/c0f219ff not reset as customized by admin to system_u:object_r:container_file_t:s0:c7,c13 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/8069f607 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/559c3d82 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/wait-for-host-port/605ad488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/148df488 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/3bf6dcb4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler/022a2feb not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/938c3924 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/729fe23e not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-cert-syncer/1fd5cbd4 not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/a96697e1 not reset as customized by admin to system_u:object_r:container_file_t:s0:c378,c723 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/e155ddca not reset as customized by admin to system_u:object_r:container_file_t:s0:c133,c223 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/3dcd261975c3d6b9a6ad6367fd4facd3/containers/kube-scheduler-recovery-controller/10dd0e0f not reset as customized by admin to system_u:object_r:container_file_t:s0:c247,c522 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..2025_02_24_06_09_35.3018472960/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-trusted-ca-bundle/ca-bundle.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..2025_02_24_06_09_35.4262376737/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/audit-policies/audit.yaml not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..2025_02_24_06_09_35.2630275752/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-cliconfig/v4-0-config-system-cliconfig not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..2025_02_24_06_09_35.2376963788/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/..data not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes/kubernetes.io~configmap/v4-0-config-system-service-ca/service-ca.crt not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/etc-hosts not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/6f2c8392 not reset as customized by admin to system_u:object_r:container_file_t:s0:c267,c588 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/containers/oauth-openshift/bd241ad9 not reset as customized by admin to system_u:object_r:container_file_t:s0:c682,c947 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/plugins not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/plugins/csi-hostpath not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/plugins/csi-hostpath/csi.sock not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983 not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/vol_data.json not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:06 crc restorecon[4695]: /var/lib/kubelet/plugins_registry not reset as customized by admin to system_u:object_r:container_file_t:s0 Nov 26 00:07:06 crc restorecon[4695]: Relabeled /var/usrlocal/bin/kubenswrapper from system_u:object_r:bin_t:s0 to system_u:object_r:kubelet_exec_t:s0 Nov 26 00:07:07 crc kubenswrapper[4875]: Flag --container-runtime-endpoint has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 00:07:07 crc kubenswrapper[4875]: Flag --minimum-container-ttl-duration has been deprecated, Use --eviction-hard or --eviction-soft instead. Will be removed in a future version. Nov 26 00:07:07 crc kubenswrapper[4875]: Flag --volume-plugin-dir has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 00:07:07 crc kubenswrapper[4875]: Flag --register-with-taints has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 00:07:07 crc kubenswrapper[4875]: Flag --pod-infra-container-image has been deprecated, will be removed in a future release. Image garbage collector will get sandbox image information from CRI. Nov 26 00:07:07 crc kubenswrapper[4875]: Flag --system-reserved has been deprecated, This parameter should be set via the config file specified by the Kubelet's --config flag. See https://kubernetes.io/docs/tasks/administer-cluster/kubelet-config-file/ for more information. Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.233820 4875 server.go:211] "--pod-infra-container-image will not be pruned by the image garbage collector in kubelet and should also be set in the remote runtime" Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242646 4875 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242677 4875 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242686 4875 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242698 4875 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242707 4875 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242716 4875 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242725 4875 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242780 4875 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242790 4875 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242797 4875 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242812 4875 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242820 4875 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242829 4875 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242836 4875 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242844 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242853 4875 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242860 4875 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242868 4875 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242875 4875 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242883 4875 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242890 4875 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242898 4875 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242906 4875 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242913 4875 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242921 4875 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242928 4875 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242936 4875 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242943 4875 feature_gate.go:330] unrecognized feature gate: Example Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242951 4875 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242958 4875 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242966 4875 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242976 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242985 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.242992 4875 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243000 4875 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243010 4875 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243020 4875 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243028 4875 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243037 4875 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243045 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243052 4875 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243060 4875 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243068 4875 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243079 4875 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243088 4875 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243098 4875 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243105 4875 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243113 4875 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243121 4875 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243128 4875 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243136 4875 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243146 4875 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243155 4875 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243164 4875 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243173 4875 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243181 4875 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243190 4875 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243200 4875 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243211 4875 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243219 4875 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243226 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243234 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243242 4875 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243250 4875 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243258 4875 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243266 4875 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243276 4875 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243285 4875 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243293 4875 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243302 4875 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.243309 4875 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243501 4875 flags.go:64] FLAG: --address="0.0.0.0" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243520 4875 flags.go:64] FLAG: --allowed-unsafe-sysctls="[]" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243536 4875 flags.go:64] FLAG: --anonymous-auth="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243547 4875 flags.go:64] FLAG: --application-metrics-count-limit="100" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243559 4875 flags.go:64] FLAG: --authentication-token-webhook="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243569 4875 flags.go:64] FLAG: --authentication-token-webhook-cache-ttl="2m0s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243583 4875 flags.go:64] FLAG: --authorization-mode="AlwaysAllow" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243594 4875 flags.go:64] FLAG: --authorization-webhook-cache-authorized-ttl="5m0s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243604 4875 flags.go:64] FLAG: --authorization-webhook-cache-unauthorized-ttl="30s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243613 4875 flags.go:64] FLAG: --boot-id-file="/proc/sys/kernel/random/boot_id" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243622 4875 flags.go:64] FLAG: --bootstrap-kubeconfig="/etc/kubernetes/kubeconfig" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243631 4875 flags.go:64] FLAG: --cert-dir="/var/lib/kubelet/pki" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243640 4875 flags.go:64] FLAG: --cgroup-driver="cgroupfs" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243650 4875 flags.go:64] FLAG: --cgroup-root="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243658 4875 flags.go:64] FLAG: --cgroups-per-qos="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243667 4875 flags.go:64] FLAG: --client-ca-file="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243675 4875 flags.go:64] FLAG: --cloud-config="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243684 4875 flags.go:64] FLAG: --cloud-provider="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243693 4875 flags.go:64] FLAG: --cluster-dns="[]" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243703 4875 flags.go:64] FLAG: --cluster-domain="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243712 4875 flags.go:64] FLAG: --config="/etc/kubernetes/kubelet.conf" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243721 4875 flags.go:64] FLAG: --config-dir="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243730 4875 flags.go:64] FLAG: --container-hints="/etc/cadvisor/container_hints.json" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243742 4875 flags.go:64] FLAG: --container-log-max-files="5" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243753 4875 flags.go:64] FLAG: --container-log-max-size="10Mi" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243762 4875 flags.go:64] FLAG: --container-runtime-endpoint="/var/run/crio/crio.sock" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243771 4875 flags.go:64] FLAG: --containerd="/run/containerd/containerd.sock" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243780 4875 flags.go:64] FLAG: --containerd-namespace="k8s.io" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243789 4875 flags.go:64] FLAG: --contention-profiling="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243799 4875 flags.go:64] FLAG: --cpu-cfs-quota="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243809 4875 flags.go:64] FLAG: --cpu-cfs-quota-period="100ms" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243819 4875 flags.go:64] FLAG: --cpu-manager-policy="none" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243830 4875 flags.go:64] FLAG: --cpu-manager-policy-options="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243842 4875 flags.go:64] FLAG: --cpu-manager-reconcile-period="10s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243851 4875 flags.go:64] FLAG: --enable-controller-attach-detach="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243860 4875 flags.go:64] FLAG: --enable-debugging-handlers="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243869 4875 flags.go:64] FLAG: --enable-load-reader="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243878 4875 flags.go:64] FLAG: --enable-server="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243886 4875 flags.go:64] FLAG: --enforce-node-allocatable="[pods]" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243898 4875 flags.go:64] FLAG: --event-burst="100" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243907 4875 flags.go:64] FLAG: --event-qps="50" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243916 4875 flags.go:64] FLAG: --event-storage-age-limit="default=0" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243925 4875 flags.go:64] FLAG: --event-storage-event-limit="default=0" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243934 4875 flags.go:64] FLAG: --eviction-hard="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243945 4875 flags.go:64] FLAG: --eviction-max-pod-grace-period="0" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243953 4875 flags.go:64] FLAG: --eviction-minimum-reclaim="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243962 4875 flags.go:64] FLAG: --eviction-pressure-transition-period="5m0s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243971 4875 flags.go:64] FLAG: --eviction-soft="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243980 4875 flags.go:64] FLAG: --eviction-soft-grace-period="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243989 4875 flags.go:64] FLAG: --exit-on-lock-contention="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.243997 4875 flags.go:64] FLAG: --experimental-allocatable-ignore-eviction="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244007 4875 flags.go:64] FLAG: --experimental-mounter-path="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244016 4875 flags.go:64] FLAG: --fail-cgroupv1="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244027 4875 flags.go:64] FLAG: --fail-swap-on="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244044 4875 flags.go:64] FLAG: --feature-gates="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244068 4875 flags.go:64] FLAG: --file-check-frequency="20s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244081 4875 flags.go:64] FLAG: --global-housekeeping-interval="1m0s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244093 4875 flags.go:64] FLAG: --hairpin-mode="promiscuous-bridge" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244104 4875 flags.go:64] FLAG: --healthz-bind-address="127.0.0.1" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244115 4875 flags.go:64] FLAG: --healthz-port="10248" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244126 4875 flags.go:64] FLAG: --help="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244136 4875 flags.go:64] FLAG: --hostname-override="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244147 4875 flags.go:64] FLAG: --housekeeping-interval="10s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244158 4875 flags.go:64] FLAG: --http-check-frequency="20s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244169 4875 flags.go:64] FLAG: --image-credential-provider-bin-dir="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244179 4875 flags.go:64] FLAG: --image-credential-provider-config="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244189 4875 flags.go:64] FLAG: --image-gc-high-threshold="85" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244200 4875 flags.go:64] FLAG: --image-gc-low-threshold="80" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244213 4875 flags.go:64] FLAG: --image-service-endpoint="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244224 4875 flags.go:64] FLAG: --kernel-memcg-notification="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244235 4875 flags.go:64] FLAG: --kube-api-burst="100" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244246 4875 flags.go:64] FLAG: --kube-api-content-type="application/vnd.kubernetes.protobuf" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244257 4875 flags.go:64] FLAG: --kube-api-qps="50" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244269 4875 flags.go:64] FLAG: --kube-reserved="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244280 4875 flags.go:64] FLAG: --kube-reserved-cgroup="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244290 4875 flags.go:64] FLAG: --kubeconfig="/var/lib/kubelet/kubeconfig" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244302 4875 flags.go:64] FLAG: --kubelet-cgroups="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244313 4875 flags.go:64] FLAG: --local-storage-capacity-isolation="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244324 4875 flags.go:64] FLAG: --lock-file="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244334 4875 flags.go:64] FLAG: --log-cadvisor-usage="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244343 4875 flags.go:64] FLAG: --log-flush-frequency="5s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244353 4875 flags.go:64] FLAG: --log-json-info-buffer-size="0" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244367 4875 flags.go:64] FLAG: --log-json-split-stream="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244376 4875 flags.go:64] FLAG: --log-text-info-buffer-size="0" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244385 4875 flags.go:64] FLAG: --log-text-split-stream="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244394 4875 flags.go:64] FLAG: --logging-format="text" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244403 4875 flags.go:64] FLAG: --machine-id-file="/etc/machine-id,/var/lib/dbus/machine-id" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244447 4875 flags.go:64] FLAG: --make-iptables-util-chains="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244456 4875 flags.go:64] FLAG: --manifest-url="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244465 4875 flags.go:64] FLAG: --manifest-url-header="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244477 4875 flags.go:64] FLAG: --max-housekeeping-interval="15s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244486 4875 flags.go:64] FLAG: --max-open-files="1000000" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244498 4875 flags.go:64] FLAG: --max-pods="110" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244506 4875 flags.go:64] FLAG: --maximum-dead-containers="-1" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244516 4875 flags.go:64] FLAG: --maximum-dead-containers-per-container="1" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244525 4875 flags.go:64] FLAG: --memory-manager-policy="None" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244534 4875 flags.go:64] FLAG: --minimum-container-ttl-duration="6m0s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244543 4875 flags.go:64] FLAG: --minimum-image-ttl-duration="2m0s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244552 4875 flags.go:64] FLAG: --node-ip="192.168.126.11" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244561 4875 flags.go:64] FLAG: --node-labels="node-role.kubernetes.io/control-plane=,node-role.kubernetes.io/master=,node.openshift.io/os_id=rhcos" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244581 4875 flags.go:64] FLAG: --node-status-max-images="50" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244590 4875 flags.go:64] FLAG: --node-status-update-frequency="10s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244600 4875 flags.go:64] FLAG: --oom-score-adj="-999" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244609 4875 flags.go:64] FLAG: --pod-cidr="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244620 4875 flags.go:64] FLAG: --pod-infra-container-image="quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:33549946e22a9ffa738fd94b1345f90921bc8f92fa6137784cb33c77ad806f9d" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244637 4875 flags.go:64] FLAG: --pod-manifest-path="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244646 4875 flags.go:64] FLAG: --pod-max-pids="-1" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244655 4875 flags.go:64] FLAG: --pods-per-core="0" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244664 4875 flags.go:64] FLAG: --port="10250" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244673 4875 flags.go:64] FLAG: --protect-kernel-defaults="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244681 4875 flags.go:64] FLAG: --provider-id="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244690 4875 flags.go:64] FLAG: --qos-reserved="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244699 4875 flags.go:64] FLAG: --read-only-port="10255" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244708 4875 flags.go:64] FLAG: --register-node="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244716 4875 flags.go:64] FLAG: --register-schedulable="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244725 4875 flags.go:64] FLAG: --register-with-taints="node-role.kubernetes.io/master=:NoSchedule" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244739 4875 flags.go:64] FLAG: --registry-burst="10" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244748 4875 flags.go:64] FLAG: --registry-qps="5" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244757 4875 flags.go:64] FLAG: --reserved-cpus="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244765 4875 flags.go:64] FLAG: --reserved-memory="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244777 4875 flags.go:64] FLAG: --resolv-conf="/etc/resolv.conf" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244786 4875 flags.go:64] FLAG: --root-dir="/var/lib/kubelet" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244796 4875 flags.go:64] FLAG: --rotate-certificates="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244805 4875 flags.go:64] FLAG: --rotate-server-certificates="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244813 4875 flags.go:64] FLAG: --runonce="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244822 4875 flags.go:64] FLAG: --runtime-cgroups="/system.slice/crio.service" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244831 4875 flags.go:64] FLAG: --runtime-request-timeout="2m0s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244842 4875 flags.go:64] FLAG: --seccomp-default="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244851 4875 flags.go:64] FLAG: --serialize-image-pulls="true" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244860 4875 flags.go:64] FLAG: --storage-driver-buffer-duration="1m0s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244869 4875 flags.go:64] FLAG: --storage-driver-db="cadvisor" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244878 4875 flags.go:64] FLAG: --storage-driver-host="localhost:8086" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244887 4875 flags.go:64] FLAG: --storage-driver-password="root" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244896 4875 flags.go:64] FLAG: --storage-driver-secure="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244905 4875 flags.go:64] FLAG: --storage-driver-table="stats" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244914 4875 flags.go:64] FLAG: --storage-driver-user="root" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244922 4875 flags.go:64] FLAG: --streaming-connection-idle-timeout="4h0m0s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244931 4875 flags.go:64] FLAG: --sync-frequency="1m0s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244940 4875 flags.go:64] FLAG: --system-cgroups="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244949 4875 flags.go:64] FLAG: --system-reserved="cpu=200m,ephemeral-storage=350Mi,memory=350Mi" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244963 4875 flags.go:64] FLAG: --system-reserved-cgroup="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244972 4875 flags.go:64] FLAG: --tls-cert-file="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244981 4875 flags.go:64] FLAG: --tls-cipher-suites="[]" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.244992 4875 flags.go:64] FLAG: --tls-min-version="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.245001 4875 flags.go:64] FLAG: --tls-private-key-file="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.245009 4875 flags.go:64] FLAG: --topology-manager-policy="none" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.245018 4875 flags.go:64] FLAG: --topology-manager-policy-options="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.245027 4875 flags.go:64] FLAG: --topology-manager-scope="container" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.245036 4875 flags.go:64] FLAG: --v="2" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.245056 4875 flags.go:64] FLAG: --version="false" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.245067 4875 flags.go:64] FLAG: --vmodule="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.245078 4875 flags.go:64] FLAG: --volume-plugin-dir="/etc/kubernetes/kubelet-plugins/volume/exec" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.245087 4875 flags.go:64] FLAG: --volume-stats-agg-period="1m0s" Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245302 4875 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245313 4875 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245322 4875 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245331 4875 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245342 4875 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245358 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245368 4875 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245377 4875 feature_gate.go:330] unrecognized feature gate: Example Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245385 4875 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245393 4875 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245401 4875 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245445 4875 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245453 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245461 4875 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245468 4875 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245476 4875 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245484 4875 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245492 4875 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245500 4875 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245507 4875 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245515 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245525 4875 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245535 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245545 4875 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245554 4875 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245563 4875 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245571 4875 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245579 4875 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245587 4875 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245595 4875 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245602 4875 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245610 4875 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245618 4875 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245625 4875 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245633 4875 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245641 4875 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245648 4875 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245662 4875 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245673 4875 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245681 4875 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245689 4875 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245699 4875 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245709 4875 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245717 4875 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245724 4875 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245731 4875 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245739 4875 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245747 4875 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245755 4875 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245762 4875 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245770 4875 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245777 4875 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245785 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245793 4875 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245800 4875 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245808 4875 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245816 4875 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245823 4875 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245831 4875 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245840 4875 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245848 4875 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245856 4875 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245863 4875 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245872 4875 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245879 4875 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245886 4875 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245894 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245904 4875 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245913 4875 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245926 4875 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.245937 4875 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.245950 4875 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.261495 4875 server.go:491] "Kubelet version" kubeletVersion="v1.31.5" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.261544 4875 server.go:493] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK="" Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261693 4875 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261712 4875 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261723 4875 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261734 4875 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261744 4875 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261753 4875 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261763 4875 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261773 4875 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261783 4875 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261794 4875 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261804 4875 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261814 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261823 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261833 4875 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261872 4875 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261881 4875 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261895 4875 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261910 4875 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261923 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261933 4875 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261943 4875 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261953 4875 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261965 4875 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261975 4875 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261984 4875 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.261995 4875 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262004 4875 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262016 4875 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262025 4875 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262035 4875 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262044 4875 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262053 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262062 4875 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262072 4875 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262081 4875 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262091 4875 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262100 4875 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262110 4875 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262120 4875 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262129 4875 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262142 4875 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262156 4875 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262168 4875 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262179 4875 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262189 4875 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262202 4875 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262213 4875 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262226 4875 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262239 4875 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262250 4875 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262262 4875 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262271 4875 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262281 4875 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262292 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262302 4875 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262311 4875 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262320 4875 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262329 4875 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262339 4875 feature_gate.go:330] unrecognized feature gate: Example Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262349 4875 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262358 4875 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262367 4875 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262377 4875 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262386 4875 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262396 4875 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262451 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262461 4875 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262471 4875 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262481 4875 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262492 4875 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262502 4875 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.262520 4875 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262824 4875 feature_gate.go:330] unrecognized feature gate: OVNObservability Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262846 4875 feature_gate.go:330] unrecognized feature gate: NetworkSegmentation Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262862 4875 feature_gate.go:353] Setting GA feature gate CloudDualStackNodeIPs=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262876 4875 feature_gate.go:330] unrecognized feature gate: VSphereControlPlaneMachineSet Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262889 4875 feature_gate.go:330] unrecognized feature gate: NutanixMultiSubnets Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262900 4875 feature_gate.go:330] unrecognized feature gate: MachineConfigNodes Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262910 4875 feature_gate.go:330] unrecognized feature gate: AzureWorkloadIdentity Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262921 4875 feature_gate.go:330] unrecognized feature gate: NetworkDiagnosticsConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262931 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIOperatorDisableMachineHealthCheckController Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262941 4875 feature_gate.go:330] unrecognized feature gate: PrivateHostedZoneAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262950 4875 feature_gate.go:330] unrecognized feature gate: CSIDriverSharedResource Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262964 4875 feature_gate.go:353] Setting GA feature gate ValidatingAdmissionPolicy=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262977 4875 feature_gate.go:351] Setting deprecated feature gate KMSv1=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.262989 4875 feature_gate.go:330] unrecognized feature gate: MinimumKubeletVersion Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263002 4875 feature_gate.go:330] unrecognized feature gate: SigstoreImageVerification Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263012 4875 feature_gate.go:330] unrecognized feature gate: AWSClusterHostedDNS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263022 4875 feature_gate.go:330] unrecognized feature gate: GCPLabelsTags Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263032 4875 feature_gate.go:330] unrecognized feature gate: ClusterMonitoringConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263042 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIMigration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263052 4875 feature_gate.go:330] unrecognized feature gate: AlibabaPlatform Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263062 4875 feature_gate.go:330] unrecognized feature gate: BuildCSIVolumes Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263072 4875 feature_gate.go:330] unrecognized feature gate: RouteAdvertisements Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263082 4875 feature_gate.go:330] unrecognized feature gate: InsightsRuntimeExtractor Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263091 4875 feature_gate.go:330] unrecognized feature gate: BareMetalLoadBalancer Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263101 4875 feature_gate.go:330] unrecognized feature gate: HardwareSpeed Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263110 4875 feature_gate.go:330] unrecognized feature gate: PlatformOperators Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263121 4875 feature_gate.go:330] unrecognized feature gate: VSphereStaticIPs Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263131 4875 feature_gate.go:330] unrecognized feature gate: MixedCPUsAllocation Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263141 4875 feature_gate.go:330] unrecognized feature gate: PinnedImages Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263151 4875 feature_gate.go:330] unrecognized feature gate: AdminNetworkPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263161 4875 feature_gate.go:330] unrecognized feature gate: InsightsOnDemandDataGather Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263170 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263181 4875 feature_gate.go:330] unrecognized feature gate: NetworkLiveMigration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263190 4875 feature_gate.go:330] unrecognized feature gate: AutomatedEtcdBackup Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263200 4875 feature_gate.go:330] unrecognized feature gate: AWSEFSDriverVolumeMetrics Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263210 4875 feature_gate.go:330] unrecognized feature gate: ConsolePluginContentSecurityPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263220 4875 feature_gate.go:330] unrecognized feature gate: GCPClusterHostedDNS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263229 4875 feature_gate.go:330] unrecognized feature gate: ExternalOIDC Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263240 4875 feature_gate.go:330] unrecognized feature gate: ImageStreamImportMode Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263250 4875 feature_gate.go:330] unrecognized feature gate: VSphereMultiNetworks Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263259 4875 feature_gate.go:330] unrecognized feature gate: VSphereMultiVCenters Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263269 4875 feature_gate.go:330] unrecognized feature gate: AdditionalRoutingCapabilities Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263280 4875 feature_gate.go:330] unrecognized feature gate: SetEIPForNLBIngressController Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263290 4875 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstall Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263300 4875 feature_gate.go:330] unrecognized feature gate: PersistentIPsForVirtualization Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263310 4875 feature_gate.go:330] unrecognized feature gate: EtcdBackendQuota Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263319 4875 feature_gate.go:330] unrecognized feature gate: NewOLM Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263329 4875 feature_gate.go:330] unrecognized feature gate: ManagedBootImages Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263338 4875 feature_gate.go:330] unrecognized feature gate: IngressControllerDynamicConfigurationManager Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263347 4875 feature_gate.go:330] unrecognized feature gate: UpgradeStatus Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263359 4875 feature_gate.go:330] unrecognized feature gate: VolumeGroupSnapshot Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263369 4875 feature_gate.go:330] unrecognized feature gate: NodeDisruptionPolicy Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263379 4875 feature_gate.go:330] unrecognized feature gate: DNSNameResolver Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263389 4875 feature_gate.go:330] unrecognized feature gate: BootcNodeManagement Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263399 4875 feature_gate.go:330] unrecognized feature gate: ManagedBootImagesAWS Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263450 4875 feature_gate.go:353] Setting GA feature gate DisableKubeletCloudCredentialProviders=true. It will be removed in a future release. Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263464 4875 feature_gate.go:330] unrecognized feature gate: VSphereDriverConfiguration Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263476 4875 feature_gate.go:330] unrecognized feature gate: MetricsCollectionProfiles Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263489 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallAzure Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263500 4875 feature_gate.go:330] unrecognized feature gate: Example Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263509 4875 feature_gate.go:330] unrecognized feature gate: MachineAPIProviderOpenStack Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263519 4875 feature_gate.go:330] unrecognized feature gate: OpenShiftPodSecurityAdmission Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263529 4875 feature_gate.go:330] unrecognized feature gate: GatewayAPI Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263538 4875 feature_gate.go:330] unrecognized feature gate: ChunkSizeMiB Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263549 4875 feature_gate.go:330] unrecognized feature gate: InsightsConfigAPI Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263558 4875 feature_gate.go:330] unrecognized feature gate: SignatureStores Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263567 4875 feature_gate.go:330] unrecognized feature gate: ClusterAPIInstallIBMCloud Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263577 4875 feature_gate.go:330] unrecognized feature gate: OnClusterBuild Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263588 4875 feature_gate.go:330] unrecognized feature gate: InsightsConfig Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263598 4875 feature_gate.go:330] unrecognized feature gate: MultiArchInstallGCP Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.263609 4875 feature_gate.go:330] unrecognized feature gate: IngressControllerLBSubnetsAWS Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.263624 4875 feature_gate.go:386] feature gates: {map[CloudDualStackNodeIPs:true DisableKubeletCloudCredentialProviders:true DynamicResourceAllocation:false EventedPLEG:false KMSv1:true MaxUnavailableStatefulSet:false NodeSwap:false ProcMountType:false RouteExternalCertificate:false ServiceAccountTokenNodeBinding:false TranslateStreamCloseWebsocketRequests:false UserNamespacesPodSecurityStandards:false UserNamespacesSupport:false ValidatingAdmissionPolicy:true VolumeAttributesClass:false]} Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.265793 4875 server.go:940] "Client rotation is on, will bootstrap in background" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.274936 4875 bootstrap.go:85] "Current kubeconfig file contents are still valid, no bootstrap necessary" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.275125 4875 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-client-current.pem". Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.278105 4875 server.go:997] "Starting client certificate rotation" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.278152 4875 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate rotation is enabled Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.280513 4875 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Certificate expiration is 2026-02-24 05:52:08 +0000 UTC, rotation deadline is 2025-11-27 08:51:34.918881978 +0000 UTC Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.280640 4875 certificate_manager.go:356] kubernetes.io/kube-apiserver-client-kubelet: Waiting 32h44m27.638247171s for next certificate rotation Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.303507 4875 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.306878 4875 dynamic_cafile_content.go:161] "Starting controller" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.330798 4875 log.go:25] "Validated CRI v1 runtime API" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.372868 4875 log.go:25] "Validated CRI v1 image API" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.376635 4875 server.go:1437] "Using cgroup driver setting received from the CRI runtime" cgroupDriver="systemd" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.384311 4875 fs.go:133] Filesystem UUIDs: map[0b076daa-c26a-46d2-b3a6-72a8dbc6e257:/dev/vda4 2025-11-26-00-01-50-00:/dev/sr0 7B77-95E7:/dev/vda2 de0497b0-db1b-465a-b278-03db02455c71:/dev/vda3] Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.384392 4875 fs.go:134] Filesystem partitions: map[/dev/shm:{mountpoint:/dev/shm major:0 minor:22 fsType:tmpfs blockSize:0} /dev/vda3:{mountpoint:/boot major:252 minor:3 fsType:ext4 blockSize:0} /dev/vda4:{mountpoint:/var major:252 minor:4 fsType:xfs blockSize:0} /run:{mountpoint:/run major:0 minor:24 fsType:tmpfs blockSize:0} /run/user/1000:{mountpoint:/run/user/1000 major:0 minor:42 fsType:tmpfs blockSize:0} /tmp:{mountpoint:/tmp major:0 minor:30 fsType:tmpfs blockSize:0} /var/lib/etcd:{mountpoint:/var/lib/etcd major:0 minor:43 fsType:tmpfs blockSize:0}] Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.421844 4875 manager.go:217] Machine: {Timestamp:2025-11-26 00:07:07.415793512 +0000 UTC m=+0.605769247 CPUVendorID:AuthenticAMD NumCores:12 NumPhysicalCores:1 NumSockets:12 CpuFrequency:2799998 MemoryCapacity:33654128640 SwapCapacity:0 MemoryByType:map[] NVMInfo:{MemoryModeCapacity:0 AppDirectModeCapacity:0 AvgPowerBudget:0} HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] MachineID:21801e6708c44f15b81395eb736a7cec SystemUUID:9f6d4e71-289a-40d7-bad1-b955dbea75de BootID:e98cdd5c-4f13-4403-b492-aac02592afb8 Filesystems:[{Device:/var/lib/etcd DeviceMajor:0 DeviceMinor:43 Capacity:1073741824 Type:vfs Inodes:4108170 HasInodes:true} {Device:/dev/shm DeviceMajor:0 DeviceMinor:22 Capacity:16827064320 Type:vfs Inodes:4108170 HasInodes:true} {Device:/run DeviceMajor:0 DeviceMinor:24 Capacity:6730825728 Type:vfs Inodes:819200 HasInodes:true} {Device:/dev/vda4 DeviceMajor:252 DeviceMinor:4 Capacity:85292941312 Type:vfs Inodes:41679680 HasInodes:true} {Device:/tmp DeviceMajor:0 DeviceMinor:30 Capacity:16827064320 Type:vfs Inodes:1048576 HasInodes:true} {Device:/dev/vda3 DeviceMajor:252 DeviceMinor:3 Capacity:366869504 Type:vfs Inodes:98304 HasInodes:true} {Device:/run/user/1000 DeviceMajor:0 DeviceMinor:42 Capacity:3365412864 Type:vfs Inodes:821634 HasInodes:true}] DiskMap:map[252:0:{Name:vda Major:252 Minor:0 Size:214748364800 Scheduler:none}] NetworkDevices:[{Name:br-ex MacAddress:fa:16:3e:4d:06:69 Speed:0 Mtu:1500} {Name:br-int MacAddress:d6:39:55:2e:22:71 Speed:0 Mtu:1400} {Name:ens3 MacAddress:fa:16:3e:4d:06:69 Speed:-1 Mtu:1500} {Name:ens7 MacAddress:fa:16:3e:2b:d7:bc Speed:-1 Mtu:1500} {Name:ens7.20 MacAddress:52:54:00:ae:f0:6e Speed:-1 Mtu:1496} {Name:ens7.21 MacAddress:52:54:00:bb:f3:5d Speed:-1 Mtu:1496} {Name:ens7.22 MacAddress:52:54:00:97:f5:df Speed:-1 Mtu:1496} {Name:eth10 MacAddress:f6:cf:ad:82:3c:cb Speed:0 Mtu:1500} {Name:ovn-k8s-mp0 MacAddress:0a:58:0a:d9:00:02 Speed:0 Mtu:1400} {Name:ovs-system MacAddress:2e:6f:b2:cb:63:a8 Speed:0 Mtu:1500}] Topology:[{Id:0 Memory:33654128640 HugePages:[{PageSize:1048576 NumPages:0} {PageSize:2048 NumPages:0}] Cores:[{Id:0 Threads:[0] Caches:[{Id:0 Size:32768 Type:Data Level:1} {Id:0 Size:32768 Type:Instruction Level:1} {Id:0 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:0 Size:16777216 Type:Unified Level:3}] SocketID:0 BookID: DrawerID:} {Id:0 Threads:[1] Caches:[{Id:1 Size:32768 Type:Data Level:1} {Id:1 Size:32768 Type:Instruction Level:1} {Id:1 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:1 Size:16777216 Type:Unified Level:3}] SocketID:1 BookID: DrawerID:} {Id:0 Threads:[10] Caches:[{Id:10 Size:32768 Type:Data Level:1} {Id:10 Size:32768 Type:Instruction Level:1} {Id:10 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:10 Size:16777216 Type:Unified Level:3}] SocketID:10 BookID: DrawerID:} {Id:0 Threads:[11] Caches:[{Id:11 Size:32768 Type:Data Level:1} {Id:11 Size:32768 Type:Instruction Level:1} {Id:11 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:11 Size:16777216 Type:Unified Level:3}] SocketID:11 BookID: DrawerID:} {Id:0 Threads:[2] Caches:[{Id:2 Size:32768 Type:Data Level:1} {Id:2 Size:32768 Type:Instruction Level:1} {Id:2 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:2 Size:16777216 Type:Unified Level:3}] SocketID:2 BookID: DrawerID:} {Id:0 Threads:[3] Caches:[{Id:3 Size:32768 Type:Data Level:1} {Id:3 Size:32768 Type:Instruction Level:1} {Id:3 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:3 Size:16777216 Type:Unified Level:3}] SocketID:3 BookID: DrawerID:} {Id:0 Threads:[4] Caches:[{Id:4 Size:32768 Type:Data Level:1} {Id:4 Size:32768 Type:Instruction Level:1} {Id:4 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:4 Size:16777216 Type:Unified Level:3}] SocketID:4 BookID: DrawerID:} {Id:0 Threads:[5] Caches:[{Id:5 Size:32768 Type:Data Level:1} {Id:5 Size:32768 Type:Instruction Level:1} {Id:5 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:5 Size:16777216 Type:Unified Level:3}] SocketID:5 BookID: DrawerID:} {Id:0 Threads:[6] Caches:[{Id:6 Size:32768 Type:Data Level:1} {Id:6 Size:32768 Type:Instruction Level:1} {Id:6 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:6 Size:16777216 Type:Unified Level:3}] SocketID:6 BookID: DrawerID:} {Id:0 Threads:[7] Caches:[{Id:7 Size:32768 Type:Data Level:1} {Id:7 Size:32768 Type:Instruction Level:1} {Id:7 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:7 Size:16777216 Type:Unified Level:3}] SocketID:7 BookID: DrawerID:} {Id:0 Threads:[8] Caches:[{Id:8 Size:32768 Type:Data Level:1} {Id:8 Size:32768 Type:Instruction Level:1} {Id:8 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:8 Size:16777216 Type:Unified Level:3}] SocketID:8 BookID: DrawerID:} {Id:0 Threads:[9] Caches:[{Id:9 Size:32768 Type:Data Level:1} {Id:9 Size:32768 Type:Instruction Level:1} {Id:9 Size:524288 Type:Unified Level:2}] UncoreCaches:[{Id:9 Size:16777216 Type:Unified Level:3}] SocketID:9 BookID: DrawerID:}] Caches:[] Distances:[10]}] CloudProvider:Unknown InstanceType:Unknown InstanceID:None} Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.422308 4875 manager_no_libpfm.go:29] cAdvisor is build without cgo and/or libpfm support. Perf event counters are not available. Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.422727 4875 manager.go:233] Version: {KernelVersion:5.14.0-427.50.2.el9_4.x86_64 ContainerOsVersion:Red Hat Enterprise Linux CoreOS 418.94.202502100215-0 DockerVersion: DockerAPIVersion: CadvisorVersion: CadvisorRevision:} Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.423900 4875 swap_util.go:113] "Swap is on" /proc/swaps contents="Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.424279 4875 container_manager_linux.go:267] "Container manager verified user specified cgroup-root exists" cgroupRoot=[] Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.424355 4875 container_manager_linux.go:272] "Creating Container Manager object based on Node Config" nodeConfig={"NodeName":"crc","RuntimeCgroupsName":"/system.slice/crio.service","SystemCgroupsName":"/system.slice","KubeletCgroupsName":"","KubeletOOMScoreAdj":-999,"ContainerRuntime":"","CgroupsPerQOS":true,"CgroupRoot":"/","CgroupDriver":"systemd","KubeletRootDir":"/var/lib/kubelet","ProtectKernelDefaults":true,"KubeReservedCgroupName":"","SystemReservedCgroupName":"","ReservedSystemCPUs":{},"EnforceNodeAllocatable":{"pods":{}},"KubeReserved":null,"SystemReserved":{"cpu":"200m","ephemeral-storage":"350Mi","memory":"350Mi"},"HardEvictionThresholds":[{"Signal":"imagefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"memory.available","Operator":"LessThan","Value":{"Quantity":"100Mi","Percentage":0},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.1},"GracePeriod":0,"MinReclaim":null},{"Signal":"nodefs.inodesFree","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.05},"GracePeriod":0,"MinReclaim":null},{"Signal":"imagefs.available","Operator":"LessThan","Value":{"Quantity":null,"Percentage":0.15},"GracePeriod":0,"MinReclaim":null}],"QOSReserved":{},"CPUManagerPolicy":"none","CPUManagerPolicyOptions":null,"TopologyManagerScope":"container","CPUManagerReconcilePeriod":10000000000,"ExperimentalMemoryManagerPolicy":"None","ExperimentalMemoryManagerReservedMemory":null,"PodPidsLimit":4096,"EnforceCPULimits":true,"CPUCFSQuotaPeriod":100000000,"TopologyManagerPolicy":"none","TopologyManagerPolicyOptions":null,"CgroupVersion":2} Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.424822 4875 topology_manager.go:138] "Creating topology manager with none policy" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.424846 4875 container_manager_linux.go:303] "Creating device plugin manager" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.425508 4875 manager.go:142] "Creating Device Plugin manager" path="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.425565 4875 server.go:66] "Creating device plugin registration server" version="v1beta1" socket="/var/lib/kubelet/device-plugins/kubelet.sock" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.425951 4875 state_mem.go:36] "Initialized new in-memory state store" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.426653 4875 server.go:1245] "Using root directory" path="/var/lib/kubelet" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.430548 4875 kubelet.go:418] "Attempting to sync node with API server" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.430595 4875 kubelet.go:313] "Adding static pod path" path="/etc/kubernetes/manifests" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.430640 4875 file.go:69] "Watching path" path="/etc/kubernetes/manifests" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.430666 4875 kubelet.go:324] "Adding apiserver pod source" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.430689 4875 apiserver.go:42] "Waiting for node sync before watching apiserver pods" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.441882 4875 kuberuntime_manager.go:262] "Container runtime initialized" containerRuntime="cri-o" version="1.31.5-4.rhaos4.18.gitdad78d5.el9" apiVersion="v1" Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.443480 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.443615 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.443481 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.443722 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.444593 4875 certificate_store.go:130] Loading cert/key pair from "/var/lib/kubelet/pki/kubelet-server-current.pem". Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.446294 4875 kubelet.go:854] "Not starting ClusterTrustBundle informer because we are in static kubelet mode" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448314 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/portworx-volume" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448367 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/empty-dir" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448383 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/git-repo" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448398 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/host-path" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448449 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/nfs" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448463 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/secret" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448478 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/iscsi" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448501 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/downward-api" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448518 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/fc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448533 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/configmap" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448551 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/projected" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.448567 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/local-volume" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.449505 4875 plugins.go:603] "Loaded volume plugin" pluginName="kubernetes.io/csi" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.450381 4875 server.go:1280] "Started kubelet" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.451484 4875 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.451825 4875 ratelimit.go:55] "Setting rate limiting for endpoint" service="podresources" qps=100 burstTokens=10 Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.451693 4875 server.go:163] "Starting to listen" address="0.0.0.0" port=10250 Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.452870 4875 server.go:236] "Starting to serve the podresources API" endpoint="unix:/var/lib/kubelet/pod-resources/kubelet.sock" Nov 26 00:07:07 crc systemd[1]: Started Kubernetes Kubelet. Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.454750 4875 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate rotation is enabled Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.454896 4875 fs_resource_analyzer.go:67] "Starting FS ResourceAnalyzer" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.455309 4875 volume_manager.go:287] "The desired_state_of_world populator starts" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.455373 4875 volume_manager.go:289] "Starting Kubelet Volume Manager" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.455821 4875 desired_state_of_world_populator.go:146] "Desired state populator starts to run" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.455395 4875 certificate_manager.go:356] kubernetes.io/kubelet-serving: Certificate expiration is 2026-02-24 05:53:03 +0000 UTC, rotation deadline is 2026-01-10 03:04:35.49770398 +0000 UTC Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.456617 4875 certificate_manager.go:356] kubernetes.io/kubelet-serving: Waiting 1082h57m28.041110825s for next certificate rotation Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.456699 4875 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.456869 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.457024 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.457307 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" interval="200ms" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.458750 4875 factory.go:55] Registering systemd factory Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.458912 4875 factory.go:221] Registration of the systemd container factory successfully Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.459804 4875 factory.go:153] Registering CRI-O factory Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.459997 4875 factory.go:221] Registration of the crio container factory successfully Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.460065 4875 server.go:460] "Adding debug handlers to kubelet server" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.462108 4875 factory.go:219] Registration of the containerd container factory failed: unable to create containerd client: containerd: cannot unix dial containerd api service: dial unix /run/containerd/containerd.sock: connect: no such file or directory Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.462182 4875 factory.go:103] Registering Raw factory Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.462254 4875 manager.go:1196] Started watching for new ooms in manager Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.460249 4875 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/default/events\": dial tcp 38.129.56.238:6443: connect: connection refused" event="&Event{ObjectMeta:{crc.187b65d104e464b9 default 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Node,Namespace:,Name:crc,UID:crc,APIVersion:,ResourceVersion:,FieldPath:,},Reason:Starting,Message:Starting kubelet.,Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 00:07:07.450328249 +0000 UTC m=+0.640303974,LastTimestamp:2025-11-26 00:07:07.450328249 +0000 UTC m=+0.640303974,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.463655 4875 manager.go:319] Starting recovery of all containers Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480485 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480577 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480605 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480627 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480650 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480674 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480695 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480716 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480741 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480763 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480785 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480825 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480858 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480884 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480908 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480930 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480953 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480975 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.480994 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481016 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5b88f790-22fa-440e-b583-365168c0b23d" volumeName="kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481038 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481061 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481082 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481103 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481126 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481148 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481176 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481211 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481233 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481257 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481279 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481299 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481319 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481340 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481363 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481384 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481403 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481557 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481585 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481611 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481635 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481653 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481681 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481703 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481727 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481749 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481772 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="efdd0498-1daa-4136-9a4a-3b948c2293fc" volumeName="kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481801 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481894 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1d611f23-29be-4491-8495-bee1670e935f" volumeName="kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481918 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481939 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3b6479f0-333b-4a96-9adf-2099afdc2447" volumeName="kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.481960 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482004 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482030 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482058 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482087 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482124 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49ef4625-1d3a-4a9f-b595-c2433d32326d" volumeName="kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482150 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482177 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482202 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482222 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482285 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482307 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482328 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482347 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482367 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482388 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482435 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482458 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" volumeName="kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482479 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482500 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482521 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482544 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482564 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482585 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482605 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482626 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482645 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" volumeName="kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482667 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a31745f5-9847-4afe-82a5-3161cc66ca93" volumeName="kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482687 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482708 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482730 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482750 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482771 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482791 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d751cbb-f2e2-430d-9754-c882a5e924a5" volumeName="kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482964 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.482997 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483020 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="25e176fe-21b4-4974-b1ed-c8b94f112a7f" volumeName="kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483043 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483068 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483091 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483113 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483134 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483153 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483177 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483198 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="37a5e44f-9a88-4405-be8a-b645485e7312" volumeName="kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483222 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="44663579-783b-4372-86d6-acf235a62d72" volumeName="kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483243 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483265 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483288 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483375 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483397 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483450 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483484 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483531 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483562 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483588 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483614 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483637 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483662 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483687 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483715 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483746 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483772 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="87cf06ed-a83f-41a7-828d-70653580a8cb" volumeName="kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483860 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483885 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483910 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6312bbd-5731-4ea0-a20f-81d5a57df44a" volumeName="kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483939 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483963 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.483984 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484010 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484034 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" volumeName="kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484061 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="20b0d48f-5fd6-431c-a545-e3c800c7b866" volumeName="kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484088 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3ab1a177-2de0-46d9-b765-d0d0649bb42e" volumeName="kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484116 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484138 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484159 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484180 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484207 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" volumeName="kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484227 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484250 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" volumeName="kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484269 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484290 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484317 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484348 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484372 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484397 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7bb08738-c794-4ee8-9972-3a62ca171029" volumeName="kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484527 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484551 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484571 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484591 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484619 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6731426b-95fe-49ff-bb5f-40441049fde2" volumeName="kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484652 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484674 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484806 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484829 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484854 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484876 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484898 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484919 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="d75a4c96-2883-4a0b-bab2-0fab2b6c0b49" volumeName="kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484946 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.484975 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09efc573-dbb6-4249-bd59-9b87aba8dd28" volumeName="kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.485003 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.485025 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.485047 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487222 4875 reconstruct.go:144] "Volume is marked device as uncertain and added into the actual state" volumeName="kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" deviceMountPath="/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487271 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487295 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487315 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" volumeName="kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487336 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5441d097-087c-4d9a-baa8-b210afa90fc9" volumeName="kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487360 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487380 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" volumeName="kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487401 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5fe579f8-e8a6-4643-bce5-a661393c4dde" volumeName="kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487456 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487479 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487499 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487525 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="43509403-f426-496e-be36-56cef71462f5" volumeName="kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487543 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="4bb40260-dbaa-4fb0-84df-5e680505d512" volumeName="kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487565 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="31d8b7a1-420e-4252-a5b7-eebe8a111292" volumeName="kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487585 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487605 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" volumeName="kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487628 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487661 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b78653f-4ff9-4508-8672-245ed9b561e3" volumeName="kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487681 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487703 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487724 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487744 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487763 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6402fda4-df10-493c-b4e5-d0569419652d" volumeName="kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487784 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487806 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7539238d-5fe0-46ed-884e-1c3b566537ec" volumeName="kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487827 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" volumeName="kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487847 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="22c825df-677d-4ca6-82db-3454ed06e783" volumeName="kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487867 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" volumeName="kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487886 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487908 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6ea678ab-3438-413e-bfe3-290ae7725660" volumeName="kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487929 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="7583ce53-e0fe-4a16-9e4d-50516596a136" volumeName="kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487952 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="9d4552c7-cd75-42dd-8880-30dd377c49a4" volumeName="kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487972 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="e7e6199b-1264-4501-8953-767f51328d08" volumeName="kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.487995 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" volumeName="kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488017 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1bf7eb37-55a3-4c65-b768-a94c82151e69" volumeName="kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488063 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488110 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="57a731c4-ef35-47a8-b875-bfb08a7f8011" volumeName="kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488144 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="5225d0e4-402f-4861-b410-819f433b1803" volumeName="kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488173 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="925f1c65-6136-48ba-85aa-3a3b50560753" volumeName="kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488232 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" volumeName="kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488258 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="c03ee662-fb2f-4fc4-a2c1-af487c19d254" volumeName="kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488281 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="01ab3dd5-8196-46d0-ad33-122e2ca51def" volumeName="kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488302 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="0b574797-001e-440a-8f4e-c0be86edad0f" volumeName="kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488335 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" volumeName="kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488357 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="496e6271-fb68-4057-954e-a0d97a4afa3f" volumeName="kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488382 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="bf126b07-da06-4140-9a57-dfd54fc6b486" volumeName="kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488401 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="ef543e1b-8068-4ea3-b32a-61027b32e95d" volumeName="kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488458 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" volumeName="kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488479 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="fda69060-fa79-4696-b1a6-7980f124bf7c" volumeName="kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488499 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="1386a44e-36a2-460c-96d0-0359d2b6f0f5" volumeName="kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488521 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="6509e943-70c6-444c-bc41-48a544e36fbd" volumeName="kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488544 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="8f668bae-612b-4b75-9490-919e737c6a3b" volumeName="kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488569 4875 reconstruct.go:130] "Volume is marked as uncertain and added into the actual state" pod="" podName="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" volumeName="kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" seLinuxMountContext="" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488590 4875 reconstruct.go:97] "Volume reconstruction finished" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.488605 4875 reconciler.go:26] "Reconciler: start to sync state" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.506287 4875 manager.go:324] Recovery completed Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.521469 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.523704 4875 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv4" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.524608 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.524756 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.524847 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.526096 4875 cpu_manager.go:225] "Starting CPU manager" policy="none" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.526144 4875 cpu_manager.go:226] "Reconciling" reconcilePeriod="10s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.526182 4875 state_mem.go:36] "Initialized new in-memory state store" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.527620 4875 kubelet_network_linux.go:50] "Initialized iptables rules." protocol="IPv6" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.527698 4875 status_manager.go:217] "Starting to sync pod status with apiserver" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.527750 4875 kubelet.go:2335] "Starting kubelet main sync loop" Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.527862 4875 kubelet.go:2359] "Skipping pod synchronization" err="[container runtime status check may not have completed yet, PLEG is not healthy: pleg has yet to be successful]" Nov 26 00:07:07 crc kubenswrapper[4875]: W1126 00:07:07.529518 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.529713 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.544314 4875 policy_none.go:49] "None policy: Start" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.555974 4875 memory_manager.go:170] "Starting memorymanager" policy="None" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.556072 4875 state_mem.go:35] "Initializing new in-memory state store" Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.557674 4875 kubelet_node_status.go:503] "Error getting the current node from lister" err="node \"crc\" not found" Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.628041 4875 kubelet.go:2359] "Skipping pod synchronization" err="container runtime status check may not have completed yet" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.650007 4875 manager.go:334] "Starting Device Plugin manager" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.650146 4875 manager.go:513] "Failed to read data from checkpoint" checkpoint="kubelet_internal_checkpoint" err="checkpoint is not found" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.650170 4875 server.go:79] "Starting device plugin registration server" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.651089 4875 eviction_manager.go:189] "Eviction manager: starting control loop" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.651835 4875 container_log_manager.go:189] "Initializing container log rotate workers" workers=1 monitorPeriod="10s" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.652044 4875 plugin_watcher.go:51] "Plugin Watcher Start" path="/var/lib/kubelet/plugins_registry" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.652199 4875 plugin_manager.go:116] "The desired_state_of_world populator (plugin watcher) starts" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.652225 4875 plugin_manager.go:118] "Starting Kubelet Plugin Manager" Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.658406 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" interval="400ms" Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.664885 4875 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.753084 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.754694 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.754740 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.754751 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.754786 4875 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.755547 4875 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.238:6443: connect: connection refused" node="crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.828970 4875 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc","openshift-kube-controller-manager/kube-controller-manager-crc","openshift-kube-scheduler/openshift-kube-scheduler-crc","openshift-machine-config-operator/kube-rbac-proxy-crio-crc","openshift-etcd/etcd-crc"] Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.829244 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.831487 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.831542 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.831560 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.831784 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.832273 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.832485 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.833526 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.833613 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.833644 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.833928 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.834452 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.834602 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.834715 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.834808 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.834834 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.837927 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.838040 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.838071 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.838493 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.839700 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.839796 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.840751 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.840805 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.840822 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.841575 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.841624 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.841638 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.841889 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.841906 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.841919 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.842085 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.843092 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.843136 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.845488 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.845549 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.845569 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.845504 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.845673 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.845694 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.846013 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.846083 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.847598 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.847814 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.848279 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.897904 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.898250 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.898663 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.898918 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.899109 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.899322 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.899555 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.899771 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.899984 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.900193 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.900510 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.900745 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.900935 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.901113 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.901181 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.956697 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.958804 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.958866 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.958880 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:07 crc kubenswrapper[4875]: I1126 00:07:07.958921 4875 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 00:07:07 crc kubenswrapper[4875]: E1126 00:07:07.959726 4875 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.238:6443: connect: connection refused" node="crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.002988 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003049 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003100 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003172 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003206 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003236 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003268 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003264 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-cert-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003320 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003368 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003368 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"static-pod-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-static-pod-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003463 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003483 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-cert-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003238 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-log-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003497 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003545 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003575 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kube\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-etc-kube\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003625 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003628 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003676 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/d1b160f5dda77d281dd8e69ec8d817f9-var-lib-kubelet\") pod \"kube-rbac-proxy-crio-crc\" (UID: \"d1b160f5dda77d281dd8e69ec8d817f9\") " pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003680 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003682 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/3dcd261975c3d6b9a6ad6367fd4facd3-resource-dir\") pod \"openshift-kube-scheduler-crc\" (UID: \"3dcd261975c3d6b9a6ad6367fd4facd3\") " pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003596 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"usr-local-bin\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-usr-local-bin\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003764 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-resource-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003918 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-cert-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003923 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003957 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.003951 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"data-dir\" (UniqueName: \"kubernetes.io/host-path/2139d3e2895fc6797b9c76a1b4c9886d-data-dir\") pod \"etcd-crc\" (UID: \"2139d3e2895fc6797b9c76a1b4c9886d\") " pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.004006 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.004117 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f614b9022728cf315e60c057852e563e-resource-dir\") pod \"kube-controller-manager-crc\" (UID: \"f614b9022728cf315e60c057852e563e\") " pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: E1126 00:07:08.060306 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" interval="800ms" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.175573 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.193358 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.208839 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.233998 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: W1126 00:07:08.240956 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf4b27818a5e8e43d0dc095d08835c792.slice/crio-c4fb3cdebba450e59263e6d2967169aede6fadbf21501b376236f4db3ff287b9 WatchSource:0}: Error finding container c4fb3cdebba450e59263e6d2967169aede6fadbf21501b376236f4db3ff287b9: Status 404 returned error can't find the container with id c4fb3cdebba450e59263e6d2967169aede6fadbf21501b376236f4db3ff287b9 Nov 26 00:07:08 crc kubenswrapper[4875]: W1126 00:07:08.242247 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf614b9022728cf315e60c057852e563e.slice/crio-acbbff69c8801b520bacefa0f7fecd45d7aeb9298aa3d19dde6e1e9f6fd344a9 WatchSource:0}: Error finding container acbbff69c8801b520bacefa0f7fecd45d7aeb9298aa3d19dde6e1e9f6fd344a9: Status 404 returned error can't find the container with id acbbff69c8801b520bacefa0f7fecd45d7aeb9298aa3d19dde6e1e9f6fd344a9 Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.245570 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd/etcd-crc" Nov 26 00:07:08 crc kubenswrapper[4875]: W1126 00:07:08.249459 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3dcd261975c3d6b9a6ad6367fd4facd3.slice/crio-00fe3cc1ad8f20f8e0433f3f4c93dff1f3e5603f22ea4c3c0cb375b3dc27aac7 WatchSource:0}: Error finding container 00fe3cc1ad8f20f8e0433f3f4c93dff1f3e5603f22ea4c3c0cb375b3dc27aac7: Status 404 returned error can't find the container with id 00fe3cc1ad8f20f8e0433f3f4c93dff1f3e5603f22ea4c3c0cb375b3dc27aac7 Nov 26 00:07:08 crc kubenswrapper[4875]: W1126 00:07:08.262080 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd1b160f5dda77d281dd8e69ec8d817f9.slice/crio-2e37f0da505666295bb6e8391bb6ace02401ba7f0f99074ef71bb5894f794f3c WatchSource:0}: Error finding container 2e37f0da505666295bb6e8391bb6ace02401ba7f0f99074ef71bb5894f794f3c: Status 404 returned error can't find the container with id 2e37f0da505666295bb6e8391bb6ace02401ba7f0f99074ef71bb5894f794f3c Nov 26 00:07:08 crc kubenswrapper[4875]: W1126 00:07:08.269696 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2139d3e2895fc6797b9c76a1b4c9886d.slice/crio-b713695622586598c7b5903fe27086ca4b72a7a2e36daece962586c454468c6f WatchSource:0}: Error finding container b713695622586598c7b5903fe27086ca4b72a7a2e36daece962586c454468c6f: Status 404 returned error can't find the container with id b713695622586598c7b5903fe27086ca4b72a7a2e36daece962586c454468c6f Nov 26 00:07:08 crc kubenswrapper[4875]: W1126 00:07:08.351389 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:08 crc kubenswrapper[4875]: E1126 00:07:08.351543 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.360145 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.362226 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.362266 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.362278 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.362308 4875 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 00:07:08 crc kubenswrapper[4875]: E1126 00:07:08.362655 4875 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.238:6443: connect: connection refused" node="crc" Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.452346 4875 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.535921 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"00fe3cc1ad8f20f8e0433f3f4c93dff1f3e5603f22ea4c3c0cb375b3dc27aac7"} Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.537359 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"acbbff69c8801b520bacefa0f7fecd45d7aeb9298aa3d19dde6e1e9f6fd344a9"} Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.538967 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"c4fb3cdebba450e59263e6d2967169aede6fadbf21501b376236f4db3ff287b9"} Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.540239 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"b713695622586598c7b5903fe27086ca4b72a7a2e36daece962586c454468c6f"} Nov 26 00:07:08 crc kubenswrapper[4875]: I1126 00:07:08.541697 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"2e37f0da505666295bb6e8391bb6ace02401ba7f0f99074ef71bb5894f794f3c"} Nov 26 00:07:08 crc kubenswrapper[4875]: W1126 00:07:08.803365 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:08 crc kubenswrapper[4875]: E1126 00:07:08.803472 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:08 crc kubenswrapper[4875]: E1126 00:07:08.861518 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" interval="1.6s" Nov 26 00:07:08 crc kubenswrapper[4875]: W1126 00:07:08.904556 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:08 crc kubenswrapper[4875]: E1126 00:07:08.904683 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:08 crc kubenswrapper[4875]: W1126 00:07:08.958918 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:08 crc kubenswrapper[4875]: E1126 00:07:08.959028 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.162933 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.164640 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.164702 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.164716 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.164758 4875 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 00:07:09 crc kubenswrapper[4875]: E1126 00:07:09.165406 4875 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.238:6443: connect: connection refused" node="crc" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.454276 4875 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.546791 4875 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49" exitCode=0 Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.546887 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49"} Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.546926 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.547979 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.548023 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.548038 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.549815 4875 generic.go:334] "Generic (PLEG): container finished" podID="d1b160f5dda77d281dd8e69ec8d817f9" containerID="79de50f8565ebaa0603152ebfb03022b2bd21faba4ad3fb999735de1a2249a1b" exitCode=0 Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.549889 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerDied","Data":"79de50f8565ebaa0603152ebfb03022b2bd21faba4ad3fb999735de1a2249a1b"} Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.549935 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.551274 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.551297 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.551306 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.551899 4875 generic.go:334] "Generic (PLEG): container finished" podID="3dcd261975c3d6b9a6ad6367fd4facd3" containerID="34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee" exitCode=0 Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.551957 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.551990 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerDied","Data":"34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee"} Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.552909 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.552936 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.552945 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.555243 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653"} Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.555301 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82"} Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.555322 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5"} Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.556696 4875 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654" exitCode=0 Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.556747 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654"} Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.556837 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.558068 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.558127 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.558137 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.563777 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.564965 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.565016 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:09 crc kubenswrapper[4875]: I1126 00:07:09.565034 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.453438 4875 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:10 crc kubenswrapper[4875]: E1126 00:07:10.462164 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" interval="3.2s" Nov 26 00:07:10 crc kubenswrapper[4875]: W1126 00:07:10.496744 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.CSIDriver: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:10 crc kubenswrapper[4875]: E1126 00:07:10.496847 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.CSIDriver: failed to list *v1.CSIDriver: Get \"https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csidrivers?limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:10 crc kubenswrapper[4875]: W1126 00:07:10.532844 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.RuntimeClass: Get "https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:10 crc kubenswrapper[4875]: E1126 00:07:10.532949 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.RuntimeClass: failed to list *v1.RuntimeClass: Get \"https://api-int.crc.testing:6443/apis/node.k8s.io/v1/runtimeclasses?limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.561919 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"11193f8290474fdb88f54086c6e42c01f4630df442b28819c4917165ef46a5cb"} Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.561980 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"fffb96f456bc7cb372dceab2ad7310da04e2487d43530da595c2b865ec8e02a3"} Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.562003 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" event={"ID":"3dcd261975c3d6b9a6ad6367fd4facd3","Type":"ContainerStarted","Data":"ec22a9fda336e3f85bcbaf3c8fee2d1096b9f9eaede26d1246858bf393ad5b55"} Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.562119 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.563341 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.563372 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.563380 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.566545 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578"} Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.566653 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.567716 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.567736 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.567746 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.572467 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec"} Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.572502 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7"} Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.572518 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0"} Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.572526 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2"} Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.574427 4875 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6" exitCode=0 Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.574478 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6"} Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.574599 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.576109 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.576132 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.576142 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.579185 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" event={"ID":"d1b160f5dda77d281dd8e69ec8d817f9","Type":"ContainerStarted","Data":"dd927fcd02e9f4b19ca442007a070dbc4ec8b26fe5e3dbd6e65f171616aebddc"} Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.579302 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.580354 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.580386 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.580399 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:10 crc kubenswrapper[4875]: W1126 00:07:10.582756 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Service: Get "https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:10 crc kubenswrapper[4875]: E1126 00:07:10.582843 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Service: failed to list *v1.Service: Get \"https://api-int.crc.testing:6443/api/v1/services?fieldSelector=spec.clusterIP%21%3DNone&limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.765771 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.767491 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.767550 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.767566 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:10 crc kubenswrapper[4875]: I1126 00:07:10.767604 4875 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 00:07:10 crc kubenswrapper[4875]: E1126 00:07:10.768257 4875 kubelet_node_status.go:99] "Unable to register node with API server" err="Post \"https://api-int.crc.testing:6443/api/v1/nodes\": dial tcp 38.129.56.238:6443: connect: connection refused" node="crc" Nov 26 00:07:10 crc kubenswrapper[4875]: W1126 00:07:10.892124 4875 reflector.go:561] k8s.io/client-go/informers/factory.go:160: failed to list *v1.Node: Get "https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:10 crc kubenswrapper[4875]: E1126 00:07:10.892236 4875 reflector.go:158] "Unhandled Error" err="k8s.io/client-go/informers/factory.go:160: Failed to watch *v1.Node: failed to list *v1.Node: Get \"https://api-int.crc.testing:6443/api/v1/nodes?fieldSelector=metadata.name%3Dcrc&limit=500&resourceVersion=0\": dial tcp 38.129.56.238:6443: connect: connection refused" logger="UnhandledError" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.452004 4875 csi_plugin.go:884] Failed to contact API server when waiting for CSINode publishing: Get "https://api-int.crc.testing:6443/apis/storage.k8s.io/v1/csinodes/crc?resourceVersion=0": dial tcp 38.129.56.238:6443: connect: connection refused Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.584101 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738"} Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.584151 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.584899 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.584922 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.584930 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.586149 4875 generic.go:334] "Generic (PLEG): container finished" podID="2139d3e2895fc6797b9c76a1b4c9886d" containerID="c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f" exitCode=0 Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.586229 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.586667 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.586917 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerDied","Data":"c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f"} Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.586992 4875 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.587012 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.587576 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.595357 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.595421 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.595433 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.595446 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.595486 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.595497 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.596193 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.596215 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.596226 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.596246 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.596283 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:11 crc kubenswrapper[4875]: I1126 00:07:11.596294 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.594225 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f"} Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.594288 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1"} Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.594304 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4"} Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.594314 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.594318 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e"} Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.594503 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd/etcd-crc" event={"ID":"2139d3e2895fc6797b9c76a1b4c9886d","Type":"ContainerStarted","Data":"aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3"} Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.594288 4875 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.594580 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.595670 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.595718 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.595670 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.595773 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.595789 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:12 crc kubenswrapper[4875]: I1126 00:07:12.595737 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.236158 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.597094 4875 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.597173 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.597251 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.598647 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.598684 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.598693 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.599311 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.599372 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.599395 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.693592 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.693792 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.695477 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.695531 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.695545 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.783384 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.783659 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.785396 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.785455 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.785464 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.969301 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.970676 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.970717 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.970728 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:13 crc kubenswrapper[4875]: I1126 00:07:13.970756 4875 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.653992 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.654246 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.655856 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.655912 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.655933 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.913908 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.914102 4875 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.914149 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.915430 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.915463 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.915474 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.980450 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-etcd/etcd-crc" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.980722 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.982025 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.982076 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:15 crc kubenswrapper[4875]: I1126 00:07:15.982091 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.116270 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.116483 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.117771 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.117815 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.117824 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.123655 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.607920 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.608795 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.608848 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.608861 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:17 crc kubenswrapper[4875]: E1126 00:07:17.665975 4875 eviction_manager.go:285] "Eviction manager: failed to get summary stats" err="failed to get node info: node \"crc\" not found" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.702199 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.702380 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.703912 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.703962 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.703981 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:17 crc kubenswrapper[4875]: I1126 00:07:17.941499 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:18 crc kubenswrapper[4875]: I1126 00:07:18.610064 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:18 crc kubenswrapper[4875]: I1126 00:07:18.611151 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:18 crc kubenswrapper[4875]: I1126 00:07:18.611243 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:18 crc kubenswrapper[4875]: I1126 00:07:18.611271 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:18 crc kubenswrapper[4875]: I1126 00:07:18.617553 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:19 crc kubenswrapper[4875]: I1126 00:07:19.011241 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-etcd/etcd-crc" Nov 26 00:07:19 crc kubenswrapper[4875]: I1126 00:07:19.011495 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:19 crc kubenswrapper[4875]: I1126 00:07:19.012772 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:19 crc kubenswrapper[4875]: I1126 00:07:19.012805 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:19 crc kubenswrapper[4875]: I1126 00:07:19.012815 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:19 crc kubenswrapper[4875]: I1126 00:07:19.612401 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:19 crc kubenswrapper[4875]: I1126 00:07:19.613507 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:19 crc kubenswrapper[4875]: I1126 00:07:19.613541 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:19 crc kubenswrapper[4875]: I1126 00:07:19.613552 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:20 crc kubenswrapper[4875]: I1126 00:07:20.942010 4875 patch_prober.go:28] interesting pod/kube-controller-manager-crc container/cluster-policy-controller namespace/openshift-kube-controller-manager: Startup probe status=failure output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" start-of-body= Nov 26 00:07:20 crc kubenswrapper[4875]: I1126 00:07:20.942104 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podUID="f614b9022728cf315e60c057852e563e" containerName="cluster-policy-controller" probeResult="failure" output="Get \"https://192.168.126.11:10357/healthz\": context deadline exceeded (Client.Timeout exceeded while awaiting headers)" Nov 26 00:07:21 crc kubenswrapper[4875]: I1126 00:07:21.719657 4875 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 00:07:21 crc kubenswrapper[4875]: I1126 00:07:21.719749 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 00:07:21 crc kubenswrapper[4875]: I1126 00:07:21.724775 4875 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver namespace/openshift-kube-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 403" start-of-body={"kind":"Status","apiVersion":"v1","metadata":{},"status":"Failure","message":"forbidden: User \"system:anonymous\" cannot get path \"/livez\"","reason":"Forbidden","details":{},"code":403} Nov 26 00:07:21 crc kubenswrapper[4875]: I1126 00:07:21.724877 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 403" Nov 26 00:07:23 crc kubenswrapper[4875]: I1126 00:07:23.241347 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:23 crc kubenswrapper[4875]: I1126 00:07:23.241525 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:23 crc kubenswrapper[4875]: I1126 00:07:23.242698 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:23 crc kubenswrapper[4875]: I1126 00:07:23.242739 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:23 crc kubenswrapper[4875]: I1126 00:07:23.242759 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:23 crc kubenswrapper[4875]: I1126 00:07:23.245373 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:23 crc kubenswrapper[4875]: I1126 00:07:23.629770 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:23 crc kubenswrapper[4875]: I1126 00:07:23.630758 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:23 crc kubenswrapper[4875]: I1126 00:07:23.630804 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:23 crc kubenswrapper[4875]: I1126 00:07:23.630826 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:26 crc kubenswrapper[4875]: E1126 00:07:26.705530 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": context deadline exceeded" interval="6.4s" Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.707926 4875 trace.go:236] Trace[163560521]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 00:07:14.848) (total time: 11859ms): Nov 26 00:07:26 crc kubenswrapper[4875]: Trace[163560521]: ---"Objects listed" error: 11859ms (00:07:26.707) Nov 26 00:07:26 crc kubenswrapper[4875]: Trace[163560521]: [11.859174012s] [11.859174012s] END Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.707967 4875 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 00:07:26 crc kubenswrapper[4875]: E1126 00:07:26.710080 4875 kubelet_node_status.go:99] "Unable to register node with API server" err="nodes \"crc\" is forbidden: autoscaling.openshift.io/ManagedNode infra config cache not synchronized" node="crc" Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.710331 4875 trace.go:236] Trace[85167937]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 00:07:14.532) (total time: 12177ms): Nov 26 00:07:26 crc kubenswrapper[4875]: Trace[85167937]: ---"Objects listed" error: 12177ms (00:07:26.710) Nov 26 00:07:26 crc kubenswrapper[4875]: Trace[85167937]: [12.177247882s] [12.177247882s] END Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.710386 4875 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.710737 4875 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.710821 4875 trace.go:236] Trace[509447031]: "Reflector ListAndWatch" name:k8s.io/client-go/informers/factory.go:160 (26-Nov-2025 00:07:15.905) (total time: 10805ms): Nov 26 00:07:26 crc kubenswrapper[4875]: Trace[509447031]: ---"Objects listed" error: 10805ms (00:07:26.710) Nov 26 00:07:26 crc kubenswrapper[4875]: Trace[509447031]: [10.805671705s] [10.805671705s] END Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.710930 4875 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.737598 4875 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Liveness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37296->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.737673 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37296->192.168.126.11:17697: read: connection reset by peer" Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.737707 4875 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37312->192.168.126.11:17697: read: connection reset by peer" start-of-body= Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.737786 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": read tcp 192.168.126.11:37312->192.168.126.11:17697: read: connection reset by peer" Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.738212 4875 patch_prober.go:28] interesting pod/kube-apiserver-crc container/kube-apiserver-check-endpoints namespace/openshift-kube-apiserver: Readiness probe status=failure output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" start-of-body= Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.738254 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" probeResult="failure" output="Get \"https://192.168.126.11:17697/healthz\": dial tcp 192.168.126.11:17697: connect: connection refused" Nov 26 00:07:26 crc kubenswrapper[4875]: I1126 00:07:26.759733 4875 reconstruct.go:205] "DevicePaths of reconstructed volumes updated" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.440793 4875 apiserver.go:52] "Watching apiserver" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.444110 4875 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.444381 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-network-operator/iptables-alerter-4ln5h","openshift-network-operator/network-operator-58b4c7f79c-55gtf","openshift-dns/node-resolver-hjshz","openshift-network-console/networking-console-plugin-85b44fc459-gdk6g","openshift-network-diagnostics/network-check-source-55646444c4-trplf","openshift-network-diagnostics/network-check-target-xd92c","openshift-network-node-identity/network-node-identity-vrzqb"] Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.444787 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.444809 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.444873 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.444898 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.444809 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.445094 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.445145 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.445208 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.445270 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.445282 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-hjshz" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.447187 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.447216 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.447223 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.447394 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.447463 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.448011 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.448075 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.448077 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.448113 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.448586 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.448959 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.449732 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.456684 4875 desired_state_of_world_populator.go:154] "Finished populating initial desired state of world" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.462881 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.462919 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") pod \"49ef4625-1d3a-4a9f-b595-c2433d32326d\" (UID: \"49ef4625-1d3a-4a9f-b595-c2433d32326d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.462945 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463248 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463297 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463335 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463351 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463349 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v" (OuterVolumeSpecName: "kube-api-access-pjr6v") pod "49ef4625-1d3a-4a9f-b595-c2433d32326d" (UID: "49ef4625-1d3a-4a9f-b595-c2433d32326d"). InnerVolumeSpecName "kube-api-access-pjr6v". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463584 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463613 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463648 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463669 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463683 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463762 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx" (OuterVolumeSpecName: "kube-api-access-d6qdx") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "kube-api-access-d6qdx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.463861 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config" (OuterVolumeSpecName: "config") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.464131 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config" (OuterVolumeSpecName: "config") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.464177 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.464233 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.464303 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs" (OuterVolumeSpecName: "tmpfs") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "tmpfs". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.464498 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert" (OuterVolumeSpecName: "profile-collector-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "profile-collector-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.464555 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.464594 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.464618 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.464640 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") pod \"44663579-783b-4372-86d6-acf235a62d72\" (UID: \"44663579-783b-4372-86d6-acf235a62d72\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.464650 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config" (OuterVolumeSpecName: "config") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.464958 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.465199 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc" (OuterVolumeSpecName: "kube-api-access-vt5rc") pod "44663579-783b-4372-86d6-acf235a62d72" (UID: "44663579-783b-4372-86d6-acf235a62d72"). InnerVolumeSpecName "kube-api-access-vt5rc". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.465216 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.465540 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.465778 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.465976 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.466045 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.465964 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca" (OuterVolumeSpecName: "image-import-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "image-import-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.466247 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.466168 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.466617 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.466632 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.466971 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.467176 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.467387 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.467599 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.467774 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468012 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468183 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468346 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468541 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468690 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468850 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.469027 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.469193 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.469507 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.469677 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.469840 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.470008 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.470163 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.470313 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.470522 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.470690 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.467496 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls" (OuterVolumeSpecName: "machine-approver-tls") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "machine-approver-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.467623 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh" (OuterVolumeSpecName: "kube-api-access-2w9zh") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "kube-api-access-2w9zh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468129 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp" (OuterVolumeSpecName: "kube-api-access-fcqwp") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "kube-api-access-fcqwp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.470863 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls" (OuterVolumeSpecName: "machine-api-operator-tls") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "machine-api-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468197 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.470882 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config" (OuterVolumeSpecName: "config") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468366 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468424 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468454 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468638 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert" (OuterVolumeSpecName: "cert") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468654 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp" (OuterVolumeSpecName: "kube-api-access-ngvvp") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "kube-api-access-ngvvp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468925 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh" (OuterVolumeSpecName: "kube-api-access-xcgwh") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "kube-api-access-xcgwh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.468987 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7" (OuterVolumeSpecName: "kube-api-access-9xfj7") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "kube-api-access-9xfj7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.469121 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.469369 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.469465 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.469525 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.470070 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6" (OuterVolumeSpecName: "kube-api-access-htfz6") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "kube-api-access-htfz6". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.470069 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.470582 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.470818 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471453 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471577 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471611 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471653 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471678 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471697 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471716 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471735 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471754 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471773 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471793 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471814 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471833 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471851 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471892 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471910 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471930 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471948 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471966 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.471983 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") pod \"20b0d48f-5fd6-431c-a545-e3c800c7b866\" (UID: \"20b0d48f-5fd6-431c-a545-e3c800c7b866\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472002 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472018 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472033 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") pod \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\" (UID: \"b6cd30de-2eeb-49a2-ab40-9167f4560ff5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472053 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472070 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472084 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472102 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") pod \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\" (UID: \"96b93a3a-6083-4aea-8eab-fe1aa8245ad9\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472244 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88" (OuterVolumeSpecName: "kube-api-access-lzf88") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "kube-api-access-lzf88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472118 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472318 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472335 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472354 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472438 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf" (OuterVolumeSpecName: "kube-api-access-7c4vf") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "kube-api-access-7c4vf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472482 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh" (OuterVolumeSpecName: "kube-api-access-x4zgh") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "kube-api-access-x4zgh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472517 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs" (OuterVolumeSpecName: "kube-api-access-pcxfs") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "kube-api-access-pcxfs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472540 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities" (OuterVolumeSpecName: "utilities") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472571 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert" (OuterVolumeSpecName: "console-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472716 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472739 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472753 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472769 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472784 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472800 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472817 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472834 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472849 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472864 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472879 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472878 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472898 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472915 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472929 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472944 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472959 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472975 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472990 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473005 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473020 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473033 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473049 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") pod \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\" (UID: \"3ab1a177-2de0-46d9-b765-d0d0649bb42e\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473064 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473078 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") pod \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\" (UID: \"1386a44e-36a2-460c-96d0-0359d2b6f0f5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473092 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473107 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473121 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473136 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473150 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473165 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473180 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473196 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473214 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473230 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473245 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473261 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") pod \"22c825df-677d-4ca6-82db-3454ed06e783\" (UID: \"22c825df-677d-4ca6-82db-3454ed06e783\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473277 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473295 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473351 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473368 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") pod \"5fe579f8-e8a6-4643-bce5-a661393c4dde\" (UID: \"5fe579f8-e8a6-4643-bce5-a661393c4dde\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473383 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473397 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473427 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") pod \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\" (UID: \"f88749ec-7931-4ee7-b3fc-1ec5e11f92e9\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473444 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473533 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") pod \"87cf06ed-a83f-41a7-828d-70653580a8cb\" (UID: \"87cf06ed-a83f-41a7-828d-70653580a8cb\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473549 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473570 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473586 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473602 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473619 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473634 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473651 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473681 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473700 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473715 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473732 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") pod \"7bb08738-c794-4ee8-9972-3a62ca171029\" (UID: \"7bb08738-c794-4ee8-9972-3a62ca171029\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473748 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473763 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473782 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473798 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473814 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") pod \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\" (UID: \"308be0ea-9f5f-4b29-aeb1-5abd31a0b17b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473829 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473845 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") pod \"496e6271-fb68-4057-954e-a0d97a4afa3f\" (UID: \"496e6271-fb68-4057-954e-a0d97a4afa3f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473862 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") pod \"43509403-f426-496e-be36-56cef71462f5\" (UID: \"43509403-f426-496e-be36-56cef71462f5\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473879 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") pod \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\" (UID: \"25e176fe-21b4-4974-b1ed-c8b94f112a7f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473898 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473914 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473929 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473948 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") pod \"6402fda4-df10-493c-b4e5-d0569419652d\" (UID: \"6402fda4-df10-493c-b4e5-d0569419652d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473963 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473979 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473995 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474012 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474076 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474095 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") pod \"5b88f790-22fa-440e-b583-365168c0b23d\" (UID: \"5b88f790-22fa-440e-b583-365168c0b23d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474453 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474474 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474490 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") pod \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\" (UID: \"09ae3b1a-e8e7-4524-b54b-61eab6f9239a\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474504 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474519 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474534 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474548 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") pod \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\" (UID: \"8cea82b4-6893-4ddc-af9f-1bb5ae425c5b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474567 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474582 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") pod \"09efc573-dbb6-4249-bd59-9b87aba8dd28\" (UID: \"09efc573-dbb6-4249-bd59-9b87aba8dd28\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474597 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474613 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474628 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") pod \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\" (UID: \"b11524ee-3fca-4b1b-9cdf-6da289fdbc7d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474642 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") pod \"7583ce53-e0fe-4a16-9e4d-50516596a136\" (UID: \"7583ce53-e0fe-4a16-9e4d-50516596a136\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474660 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") pod \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\" (UID: \"cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474675 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") pod \"e7e6199b-1264-4501-8953-767f51328d08\" (UID: \"e7e6199b-1264-4501-8953-767f51328d08\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474696 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") pod \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\" (UID: \"49c341d1-5089-4bc2-86a0-a5e165cfcc6b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474715 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474731 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474746 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474762 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474777 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474797 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474812 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") pod \"5441d097-087c-4d9a-baa8-b210afa90fc9\" (UID: \"5441d097-087c-4d9a-baa8-b210afa90fc9\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474826 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") pod \"bf126b07-da06-4140-9a57-dfd54fc6b486\" (UID: \"bf126b07-da06-4140-9a57-dfd54fc6b486\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474841 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474856 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") pod \"01ab3dd5-8196-46d0-ad33-122e2ca51def\" (UID: \"01ab3dd5-8196-46d0-ad33-122e2ca51def\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474870 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") pod \"31d8b7a1-420e-4252-a5b7-eebe8a111292\" (UID: \"31d8b7a1-420e-4252-a5b7-eebe8a111292\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474886 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") pod \"0b574797-001e-440a-8f4e-c0be86edad0f\" (UID: \"0b574797-001e-440a-8f4e-c0be86edad0f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474900 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") pod \"1bf7eb37-55a3-4c65-b768-a94c82151e69\" (UID: \"1bf7eb37-55a3-4c65-b768-a94c82151e69\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474915 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") pod \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\" (UID: \"bc5039c0-ea34-426b-a2b7-fbbc87b49a6d\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474930 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") pod \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\" (UID: \"b6312bbd-5731-4ea0-a20f-81d5a57df44a\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474945 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") pod \"9d4552c7-cd75-42dd-8880-30dd377c49a4\" (UID: \"9d4552c7-cd75-42dd-8880-30dd377c49a4\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474960 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") pod \"7539238d-5fe0-46ed-884e-1c3b566537ec\" (UID: \"7539238d-5fe0-46ed-884e-1c3b566537ec\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474974 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") pod \"925f1c65-6136-48ba-85aa-3a3b50560753\" (UID: \"925f1c65-6136-48ba-85aa-3a3b50560753\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474989 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") pod \"efdd0498-1daa-4136-9a4a-3b948c2293fc\" (UID: \"efdd0498-1daa-4136-9a4a-3b948c2293fc\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475004 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") pod \"4bb40260-dbaa-4fb0-84df-5e680505d512\" (UID: \"4bb40260-dbaa-4fb0-84df-5e680505d512\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475019 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") pod \"5225d0e4-402f-4861-b410-819f433b1803\" (UID: \"5225d0e4-402f-4861-b410-819f433b1803\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475034 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") pod \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\" (UID: \"c03ee662-fb2f-4fc4-a2c1-af487c19d254\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475050 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") pod \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\" (UID: \"3cb93b32-e0ae-4377-b9c8-fdb9842c6d59\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475065 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") pod \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\" (UID: \"210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475080 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") pod \"0b78653f-4ff9-4508-8672-245ed9b561e3\" (UID: \"0b78653f-4ff9-4508-8672-245ed9b561e3\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475096 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") pod \"6509e943-70c6-444c-bc41-48a544e36fbd\" (UID: \"6509e943-70c6-444c-bc41-48a544e36fbd\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475111 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") pod \"6ea678ab-3438-413e-bfe3-290ae7725660\" (UID: \"6ea678ab-3438-413e-bfe3-290ae7725660\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475126 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") pod \"1d611f23-29be-4491-8495-bee1670e935f\" (UID: \"1d611f23-29be-4491-8495-bee1670e935f\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475142 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") pod \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\" (UID: \"bd23aa5c-e532-4e53-bccf-e79f130c5ae8\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475159 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475174 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") pod \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\" (UID: \"a0128f3a-b052-44ed-a84e-c4c8aaf17c13\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475189 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") pod \"6731426b-95fe-49ff-bb5f-40441049fde2\" (UID: \"6731426b-95fe-49ff-bb5f-40441049fde2\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475205 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") pod \"fda69060-fa79-4696-b1a6-7980f124bf7c\" (UID: \"fda69060-fa79-4696-b1a6-7980f124bf7c\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475220 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") pod \"a31745f5-9847-4afe-82a5-3161cc66ca93\" (UID: \"a31745f5-9847-4afe-82a5-3161cc66ca93\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475236 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") pod \"57a731c4-ef35-47a8-b875-bfb08a7f8011\" (UID: \"57a731c4-ef35-47a8-b875-bfb08a7f8011\") " Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475276 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475299 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475318 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475337 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475358 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475378 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475395 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/5e5a9914-f410-4b51-8e16-77e18f7b09fb-hosts-file\") pod \"node-resolver-hjshz\" (UID: \"5e5a9914-f410-4b51-8e16-77e18f7b09fb\") " pod="openshift-dns/node-resolver-hjshz" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475431 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475451 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475468 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475487 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475503 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475521 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mm8dm\" (UniqueName: \"kubernetes.io/projected/5e5a9914-f410-4b51-8e16-77e18f7b09fb-kube-api-access-mm8dm\") pod \"node-resolver-hjshz\" (UID: \"5e5a9914-f410-4b51-8e16-77e18f7b09fb\") " pod="openshift-dns/node-resolver-hjshz" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475538 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475562 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475582 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475654 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pjr6v\" (UniqueName: \"kubernetes.io/projected/49ef4625-1d3a-4a9f-b595-c2433d32326d-kube-api-access-pjr6v\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475666 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7539238d-5fe0-46ed-884e-1c3b566537ec-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475676 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/496e6271-fb68-4057-954e-a0d97a4afa3f-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475686 4875 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475697 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475711 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d6qdx\" (UniqueName: \"kubernetes.io/projected/87cf06ed-a83f-41a7-828d-70653580a8cb-kube-api-access-d6qdx\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475724 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475733 4875 reconciler_common.go:293] "Volume detached for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-tmpfs\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475742 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1386a44e-36a2-460c-96d0-0359d2b6f0f5-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475751 4875 reconciler_common.go:293] "Volume detached for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-profile-collector-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475765 4875 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475774 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475782 4875 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475792 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vt5rc\" (UniqueName: \"kubernetes.io/projected/44663579-783b-4372-86d6-acf235a62d72-kube-api-access-vt5rc\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475801 4875 reconciler_common.go:293] "Volume detached for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-image-import-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475813 4875 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/6ea678ab-3438-413e-bfe3-290ae7725660-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475823 4875 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/31d8b7a1-420e-4252-a5b7-eebe8a111292-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475833 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475842 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/e7e6199b-1264-4501-8953-767f51328d08-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475851 4875 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475885 4875 reconciler_common.go:293] "Volume detached for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/22c825df-677d-4ca6-82db-3454ed06e783-machine-approver-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475894 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2w9zh\" (UniqueName: \"kubernetes.io/projected/4bb40260-dbaa-4fb0-84df-5e680505d512-kube-api-access-2w9zh\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475903 4875 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475917 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fcqwp\" (UniqueName: \"kubernetes.io/projected/5fe579f8-e8a6-4643-bce5-a661393c4dde-kube-api-access-fcqwp\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475926 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475935 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ngvvp\" (UniqueName: \"kubernetes.io/projected/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-kube-api-access-ngvvp\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475945 4875 reconciler_common.go:293] "Volume detached for volume \"cert\" (UniqueName: \"kubernetes.io/secret/20b0d48f-5fd6-431c-a545-e3c800c7b866-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475954 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-9xfj7\" (UniqueName: \"kubernetes.io/projected/5225d0e4-402f-4861-b410-819f433b1803-kube-api-access-9xfj7\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475963 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcgwh\" (UniqueName: \"kubernetes.io/projected/fda69060-fa79-4696-b1a6-7980f124bf7c-kube-api-access-xcgwh\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475971 4875 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475980 4875 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475989 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/01ab3dd5-8196-46d0-ad33-122e2ca51def-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475998 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476007 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-htfz6\" (UniqueName: \"kubernetes.io/projected/6ea678ab-3438-413e-bfe3-290ae7725660-kube-api-access-htfz6\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476016 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476024 4875 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476033 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476043 4875 reconciler_common.go:293] "Volume detached for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/6402fda4-df10-493c-b4e5-d0569419652d-machine-api-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476052 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476116 4875 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476125 4875 reconciler_common.go:293] "Volume detached for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476133 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x4zgh\" (UniqueName: \"kubernetes.io/projected/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-kube-api-access-x4zgh\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476143 4875 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476156 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476165 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lzf88\" (UniqueName: \"kubernetes.io/projected/0b574797-001e-440a-8f4e-c0be86edad0f-kube-api-access-lzf88\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476174 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-7c4vf\" (UniqueName: \"kubernetes.io/projected/22c825df-677d-4ca6-82db-3454ed06e783-kube-api-access-7c4vf\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476183 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pcxfs\" (UniqueName: \"kubernetes.io/projected/9d4552c7-cd75-42dd-8880-30dd377c49a4-kube-api-access-pcxfs\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.472881 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images" (OuterVolumeSpecName: "images") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473164 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473336 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7" (OuterVolumeSpecName: "kube-api-access-nzwt7") pod "96b93a3a-6083-4aea-8eab-fe1aa8245ad9" (UID: "96b93a3a-6083-4aea-8eab-fe1aa8245ad9"). InnerVolumeSpecName "kube-api-access-nzwt7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473461 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy" (OuterVolumeSpecName: "cni-binary-copy") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "cni-binary-copy". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473605 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5" (OuterVolumeSpecName: "kube-api-access-zgdk5") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "kube-api-access-zgdk5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473469 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473665 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj" (OuterVolumeSpecName: "kube-api-access-4d4hj") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "kube-api-access-4d4hj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473686 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473772 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473751 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz" (OuterVolumeSpecName: "kube-api-access-6g6sz") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "kube-api-access-6g6sz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473841 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh" (OuterVolumeSpecName: "kube-api-access-x7zkh") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "kube-api-access-x7zkh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473895 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd" (OuterVolumeSpecName: "kube-api-access-mnrrd") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "kube-api-access-mnrrd". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.481114 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473932 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.473937 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.474060 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn" (OuterVolumeSpecName: "kube-api-access-lz9wn") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "kube-api-access-lz9wn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475314 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs" (OuterVolumeSpecName: "certs") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.475354 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt" (OuterVolumeSpecName: "kube-api-access-fqsjt") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "kube-api-access-fqsjt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.477865 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52" (OuterVolumeSpecName: "kube-api-access-s4n52") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "kube-api-access-s4n52". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.478022 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.478401 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.478559 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.478943 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds" (OuterVolumeSpecName: "kube-api-access-w9rds") pod "20b0d48f-5fd6-431c-a545-e3c800c7b866" (UID: "20b0d48f-5fd6-431c-a545-e3c800c7b866"). InnerVolumeSpecName "kube-api-access-w9rds". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.479217 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.479385 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.479539 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782" (OuterVolumeSpecName: "kube-api-access-pj782") pod "b6cd30de-2eeb-49a2-ab40-9167f4560ff5" (UID: "b6cd30de-2eeb-49a2-ab40-9167f4560ff5"). InnerVolumeSpecName "kube-api-access-pj782". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.479619 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert" (OuterVolumeSpecName: "apiservice-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "apiservice-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.479953 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities" (OuterVolumeSpecName: "utilities") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.480303 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities" (OuterVolumeSpecName: "utilities") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.480629 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token" (OuterVolumeSpecName: "node-bootstrap-token") pod "5fe579f8-e8a6-4643-bce5-a661393c4dde" (UID: "5fe579f8-e8a6-4643-bce5-a661393c4dde"). InnerVolumeSpecName "node-bootstrap-token". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.480709 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config" (OuterVolumeSpecName: "config") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.480893 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.481117 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4" (OuterVolumeSpecName: "kube-api-access-w4xd4") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "kube-api-access-w4xd4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.481402 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.481444 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit" (OuterVolumeSpecName: "audit") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "audit". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.481554 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.482647 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg" (OuterVolumeSpecName: "kube-api-access-dbsvg") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "kube-api-access-dbsvg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.479932 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "1386a44e-36a2-460c-96d0-0359d2b6f0f5" (UID: "1386a44e-36a2-460c-96d0-0359d2b6f0f5"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.482883 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.482901 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb" (OuterVolumeSpecName: "kube-api-access-279lb") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "kube-api-access-279lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.482913 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert" (OuterVolumeSpecName: "oauth-serving-cert") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "oauth-serving-cert". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.483093 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config" (OuterVolumeSpecName: "mcd-auth-proxy-config") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "mcd-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.476996 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"iptables-alerter-script\" (UniqueName: \"kubernetes.io/configmap/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-iptables-alerter-script\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.483239 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle" (OuterVolumeSpecName: "trusted-ca-bundle") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.483762 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert" (OuterVolumeSpecName: "package-server-manager-serving-cert") pod "3ab1a177-2de0-46d9-b765-d0d0649bb42e" (UID: "3ab1a177-2de0-46d9-b765-d0d0649bb42e"). InnerVolumeSpecName "package-server-manager-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.483799 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca" (OuterVolumeSpecName: "etcd-service-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.483871 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates" (OuterVolumeSpecName: "available-featuregates") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "available-featuregates". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.483909 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8" (OuterVolumeSpecName: "kube-api-access-wxkg8") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "kube-api-access-wxkg8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.484213 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.484228 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.484231 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume" (OuterVolumeSpecName: "config-volume") pod "87cf06ed-a83f-41a7-828d-70653580a8cb" (UID: "87cf06ed-a83f-41a7-828d-70653580a8cb"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.484554 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config" (OuterVolumeSpecName: "config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.484705 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp" (OuterVolumeSpecName: "kube-api-access-qs4fp") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "kube-api-access-qs4fp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.484700 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.484945 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:07:27.984874456 +0000 UTC m=+21.174850151 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.484976 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.485016 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c" (OuterVolumeSpecName: "kube-api-access-tk88c") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "kube-api-access-tk88c". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.485043 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config" (OuterVolumeSpecName: "auth-proxy-config") pod "22c825df-677d-4ca6-82db-3454ed06e783" (UID: "22c825df-677d-4ca6-82db-3454ed06e783"). InnerVolumeSpecName "auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.484939 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities" (OuterVolumeSpecName: "utilities") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.485266 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.485292 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.485702 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert" (OuterVolumeSpecName: "webhook-cert") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "webhook-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.485775 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca" (OuterVolumeSpecName: "client-ca") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.486033 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn" (OuterVolumeSpecName: "kube-api-access-jkwtn") pod "5b88f790-22fa-440e-b583-365168c0b23d" (UID: "5b88f790-22fa-440e-b583-365168c0b23d"). InnerVolumeSpecName "kube-api-access-jkwtn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.486176 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.486334 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images" (OuterVolumeSpecName: "images") pod "31d8b7a1-420e-4252-a5b7-eebe8a111292" (UID: "31d8b7a1-420e-4252-a5b7-eebe8a111292"). InnerVolumeSpecName "images". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.486356 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487142 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl" (OuterVolumeSpecName: "kube-api-access-xcphl") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "kube-api-access-xcphl". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487208 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle" (OuterVolumeSpecName: "signing-cabundle") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-cabundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487277 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config" (OuterVolumeSpecName: "config") pod "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" (UID: "8cea82b4-6893-4ddc-af9f-1bb5ae425c5b"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487437 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487451 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz" (OuterVolumeSpecName: "kube-api-access-2d4wz") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "kube-api-access-2d4wz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487589 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85" (OuterVolumeSpecName: "kube-api-access-x2m85") pod "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" (UID: "cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d"). InnerVolumeSpecName "kube-api-access-x2m85". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487591 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth" (OuterVolumeSpecName: "stats-auth") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "stats-auth". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487638 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" (UID: "f88749ec-7931-4ee7-b3fc-1ec5e11f92e9"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487707 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487884 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk" (OuterVolumeSpecName: "kube-api-access-rnphk") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "kube-api-access-rnphk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487955 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key" (OuterVolumeSpecName: "signing-key") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "signing-key". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.487972 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca" (OuterVolumeSpecName: "etcd-ca") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.488172 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.486814 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls" (OuterVolumeSpecName: "image-registry-operator-tls") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "image-registry-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.488666 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.488727 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs" (OuterVolumeSpecName: "webhook-certs") pod "efdd0498-1daa-4136-9a4a-3b948c2293fc" (UID: "efdd0498-1daa-4136-9a4a-3b948c2293fc"). InnerVolumeSpecName "webhook-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.488875 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config" (OuterVolumeSpecName: "multus-daemon-config") pod "4bb40260-dbaa-4fb0-84df-5e680505d512" (UID: "4bb40260-dbaa-4fb0-84df-5e680505d512"). InnerVolumeSpecName "multus-daemon-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.489022 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "6509e943-70c6-444c-bc41-48a544e36fbd" (UID: "6509e943-70c6-444c-bc41-48a544e36fbd"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.489215 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca" (OuterVolumeSpecName: "etcd-serving-ca") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "etcd-serving-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.489455 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert" (OuterVolumeSpecName: "ovn-control-plane-metrics-cert") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "ovn-control-plane-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.489524 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.489536 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs" (OuterVolumeSpecName: "metrics-certs") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "metrics-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.490528 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls" (OuterVolumeSpecName: "control-plane-machine-set-operator-tls") pod "6731426b-95fe-49ff-bb5f-40441049fde2" (UID: "6731426b-95fe-49ff-bb5f-40441049fde2"). InnerVolumeSpecName "control-plane-machine-set-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.490655 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca" (OuterVolumeSpecName: "service-ca") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.490799 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" (UID: "bc5039c0-ea34-426b-a2b7-fbbc87b49a6d"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.490968 4875 swap_util.go:74] "error creating dir to test if tmpfs noswap is enabled. Assuming not supported" mount path="" error="stat /var/lib/kubelet/plugins/kubernetes.io/empty-dir: no such file or directory" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.491015 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "925f1c65-6136-48ba-85aa-3a3b50560753" (UID: "925f1c65-6136-48ba-85aa-3a3b50560753"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.491057 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "496e6271-fb68-4057-954e-a0d97a4afa3f" (UID: "496e6271-fb68-4057-954e-a0d97a4afa3f"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.491130 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr" (OuterVolumeSpecName: "kube-api-access-249nr") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "kube-api-access-249nr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.491498 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "bf126b07-da06-4140-9a57-dfd54fc6b486" (UID: "bf126b07-da06-4140-9a57-dfd54fc6b486"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.491881 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7" (OuterVolumeSpecName: "kube-api-access-kfwg7") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "kube-api-access-kfwg7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.492121 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.492197 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config" (OuterVolumeSpecName: "config") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.492313 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca" (OuterVolumeSpecName: "serviceca") pod "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" (UID: "3cb93b32-e0ae-4377-b9c8-fdb9842c6d59"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.492317 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct" (OuterVolumeSpecName: "kube-api-access-cfbct") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "kube-api-access-cfbct". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.492611 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8" (OuterVolumeSpecName: "kube-api-access-6ccd8") pod "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" (UID: "308be0ea-9f5f-4b29-aeb1-5abd31a0b17b"). InnerVolumeSpecName "kube-api-access-6ccd8". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.492685 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config" (OuterVolumeSpecName: "mcc-auth-proxy-config") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "mcc-auth-proxy-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.492721 4875 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.492753 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv" (OuterVolumeSpecName: "kube-api-access-d4lsv") pod "25e176fe-21b4-4974-b1ed-c8b94f112a7f" (UID: "25e176fe-21b4-4974-b1ed-c8b94f112a7f"). InnerVolumeSpecName "kube-api-access-d4lsv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.492812 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:27.992781486 +0000 UTC m=+21.182757181 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.492812 4875 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.492830 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca" (OuterVolumeSpecName: "client-ca") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.492959 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m" (OuterVolumeSpecName: "kube-api-access-gf66m") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "kube-api-access-gf66m". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.493230 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "0b574797-001e-440a-8f4e-c0be86edad0f" (UID: "0b574797-001e-440a-8f4e-c0be86edad0f"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.493246 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:27.993230118 +0000 UTC m=+21.183205813 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.493430 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv" (OuterVolumeSpecName: "kube-api-access-zkvpv") pod "09ae3b1a-e8e7-4524-b54b-61eab6f9239a" (UID: "09ae3b1a-e8e7-4524-b54b-61eab6f9239a"). InnerVolumeSpecName "kube-api-access-zkvpv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.493817 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls" (OuterVolumeSpecName: "proxy-tls") pod "fda69060-fa79-4696-b1a6-7980f124bf7c" (UID: "fda69060-fa79-4696-b1a6-7980f124bf7c"). InnerVolumeSpecName "proxy-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.493920 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate" (OuterVolumeSpecName: "default-certificate") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "default-certificate". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.493967 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca" (OuterVolumeSpecName: "service-ca") pod "0b78653f-4ff9-4508-8672-245ed9b561e3" (UID: "0b78653f-4ff9-4508-8672-245ed9b561e3"). InnerVolumeSpecName "service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.494171 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist" (OuterVolumeSpecName: "cni-sysctl-allowlist") pod "7bb08738-c794-4ee8-9972-3a62ca171029" (UID: "7bb08738-c794-4ee8-9972-3a62ca171029"). InnerVolumeSpecName "cni-sysctl-allowlist". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.494282 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config" (OuterVolumeSpecName: "config") pod "7539238d-5fe0-46ed-884e-1c3b566537ec" (UID: "7539238d-5fe0-46ed-884e-1c3b566537ec"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.494321 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.494406 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle" (OuterVolumeSpecName: "service-ca-bundle") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "service-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.501613 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.501859 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/37a5e44f-9a88-4405-be8a-b645485e7312-metrics-tls\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.502537 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "9d4552c7-cd75-42dd-8880-30dd377c49a4" (UID: "9d4552c7-cd75-42dd-8880-30dd377c49a4"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.502643 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.502708 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "49c341d1-5089-4bc2-86a0-a5e165cfcc6b" (UID: "49c341d1-5089-4bc2-86a0-a5e165cfcc6b"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.504771 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config" (OuterVolumeSpecName: "encryption-config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "encryption-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.505018 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz" (OuterVolumeSpecName: "kube-api-access-bf2bz") pod "1d611f23-29be-4491-8495-bee1670e935f" (UID: "1d611f23-29be-4491-8495-bee1670e935f"). InnerVolumeSpecName "kube-api-access-bf2bz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.505178 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "6ea678ab-3438-413e-bfe3-290ae7725660" (UID: "6ea678ab-3438-413e-bfe3-290ae7725660"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.505183 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7" (OuterVolumeSpecName: "kube-api-access-sb6h7") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "kube-api-access-sb6h7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.506389 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert" (OuterVolumeSpecName: "srv-cert") pod "b6312bbd-5731-4ea0-a20f-81d5a57df44a" (UID: "b6312bbd-5731-4ea0-a20f-81d5a57df44a"). InnerVolumeSpecName "srv-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.506915 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf" (OuterVolumeSpecName: "kube-api-access-v47cf") pod "c03ee662-fb2f-4fc4-a2c1-af487c19d254" (UID: "c03ee662-fb2f-4fc4-a2c1-af487c19d254"). InnerVolumeSpecName "kube-api-access-v47cf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.507009 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.507028 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.507041 4875 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.507096 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:28.00707632 +0000 UTC m=+21.197052135 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.507239 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.507883 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/ef543e1b-8068-4ea3-b32a-61027b32e95d-webhook-cert\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.508212 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-identity-cm\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-ovnkube-identity-cm\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.506517 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/ef543e1b-8068-4ea3-b32a-61027b32e95d-env-overrides\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.508465 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config" (OuterVolumeSpecName: "config") pod "1bf7eb37-55a3-4c65-b768-a94c82151e69" (UID: "1bf7eb37-55a3-4c65-b768-a94c82151e69"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.508500 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config" (OuterVolumeSpecName: "config") pod "e7e6199b-1264-4501-8953-767f51328d08" (UID: "e7e6199b-1264-4501-8953-767f51328d08"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.508816 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2" (OuterVolumeSpecName: "kube-api-access-jhbk2") pod "bd23aa5c-e532-4e53-bccf-e79f130c5ae8" (UID: "bd23aa5c-e532-4e53-bccf-e79f130c5ae8"). InnerVolumeSpecName "kube-api-access-jhbk2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.514840 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config" (OuterVolumeSpecName: "config") pod "5441d097-087c-4d9a-baa8-b210afa90fc9" (UID: "5441d097-087c-4d9a-baa8-b210afa90fc9"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.516040 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb" (OuterVolumeSpecName: "kube-api-access-mg5zb") pod "6402fda4-df10-493c-b4e5-d0569419652d" (UID: "6402fda4-df10-493c-b4e5-d0569419652d"). InnerVolumeSpecName "kube-api-access-mg5zb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.516059 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5" (OuterVolumeSpecName: "kube-api-access-qg5z5") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "kube-api-access-qg5z5". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.516882 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config" (OuterVolumeSpecName: "console-oauth-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-oauth-config". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.517068 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.519083 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.519110 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.519287 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls" (OuterVolumeSpecName: "samples-operator-tls") pod "a0128f3a-b052-44ed-a84e-c4c8aaf17c13" (UID: "a0128f3a-b052-44ed-a84e-c4c8aaf17c13"). InnerVolumeSpecName "samples-operator-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.521683 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" (UID: "210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.523936 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rdwmf\" (UniqueName: \"kubernetes.io/projected/37a5e44f-9a88-4405-be8a-b645485e7312-kube-api-access-rdwmf\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.524924 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.524988 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.525007 4875 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.525091 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:28.025065478 +0000 UTC m=+21.215041383 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.525608 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config" (OuterVolumeSpecName: "console-config") pod "43509403-f426-496e-be36-56cef71462f5" (UID: "43509403-f426-496e-be36-56cef71462f5"). InnerVolumeSpecName "console-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.525951 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls" (OuterVolumeSpecName: "metrics-tls") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "metrics-tls". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.526097 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.527206 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config" (OuterVolumeSpecName: "config") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.527213 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rczfb\" (UniqueName: \"kubernetes.io/projected/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-kube-api-access-rczfb\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.527492 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j" (OuterVolumeSpecName: "kube-api-access-w7l8j") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "kube-api-access-w7l8j". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.527795 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz" (OuterVolumeSpecName: "kube-api-access-8tdtz") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "kube-api-access-8tdtz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.527834 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client" (OuterVolumeSpecName: "etcd-client") pod "09efc573-dbb6-4249-bd59-9b87aba8dd28" (UID: "09efc573-dbb6-4249-bd59-9b87aba8dd28"). InnerVolumeSpecName "etcd-client". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.527561 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config" (OuterVolumeSpecName: "config") pod "7583ce53-e0fe-4a16-9e4d-50516596a136" (UID: "7583ce53-e0fe-4a16-9e4d-50516596a136"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.529200 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.529845 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config" (OuterVolumeSpecName: "config") pod "01ab3dd5-8196-46d0-ad33-122e2ca51def" (UID: "01ab3dd5-8196-46d0-ad33-122e2ca51def"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.531539 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "a31745f5-9847-4afe-82a5-3161cc66ca93" (UID: "a31745f5-9847-4afe-82a5-3161cc66ca93"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.532437 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2kz5\" (UniqueName: \"kubernetes.io/projected/ef543e1b-8068-4ea3-b32a-61027b32e95d-kube-api-access-s2kz5\") pod \"network-node-identity-vrzqb\" (UID: \"ef543e1b-8068-4ea3-b32a-61027b32e95d\") " pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.535211 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="01ab3dd5-8196-46d0-ad33-122e2ca51def" path="/var/lib/kubelet/pods/01ab3dd5-8196-46d0-ad33-122e2ca51def/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.536134 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09ae3b1a-e8e7-4524-b54b-61eab6f9239a" path="/var/lib/kubelet/pods/09ae3b1a-e8e7-4524-b54b-61eab6f9239a/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.536380 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "57a731c4-ef35-47a8-b875-bfb08a7f8011" (UID: "57a731c4-ef35-47a8-b875-bfb08a7f8011"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.540635 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="09efc573-dbb6-4249-bd59-9b87aba8dd28" path="/var/lib/kubelet/pods/09efc573-dbb6-4249-bd59-9b87aba8dd28/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.541318 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.541716 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.543518 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b574797-001e-440a-8f4e-c0be86edad0f" path="/var/lib/kubelet/pods/0b574797-001e-440a-8f4e-c0be86edad0f/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.545448 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" (UID: "b11524ee-3fca-4b1b-9cdf-6da289fdbc7d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.545720 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0b78653f-4ff9-4508-8672-245ed9b561e3" path="/var/lib/kubelet/pods/0b78653f-4ff9-4508-8672-245ed9b561e3/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.546341 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1386a44e-36a2-460c-96d0-0359d2b6f0f5" path="/var/lib/kubelet/pods/1386a44e-36a2-460c-96d0-0359d2b6f0f5/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.547224 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1bf7eb37-55a3-4c65-b768-a94c82151e69" path="/var/lib/kubelet/pods/1bf7eb37-55a3-4c65-b768-a94c82151e69/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.548532 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1d611f23-29be-4491-8495-bee1670e935f" path="/var/lib/kubelet/pods/1d611f23-29be-4491-8495-bee1670e935f/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.549485 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="20b0d48f-5fd6-431c-a545-e3c800c7b866" path="/var/lib/kubelet/pods/20b0d48f-5fd6-431c-a545-e3c800c7b866/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.550736 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c" path="/var/lib/kubelet/pods/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.551487 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="22c825df-677d-4ca6-82db-3454ed06e783" path="/var/lib/kubelet/pods/22c825df-677d-4ca6-82db-3454ed06e783/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.551615 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.552522 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "5225d0e4-402f-4861-b410-819f433b1803" (UID: "5225d0e4-402f-4861-b410-819f433b1803"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.553398 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="25e176fe-21b4-4974-b1ed-c8b94f112a7f" path="/var/lib/kubelet/pods/25e176fe-21b4-4974-b1ed-c8b94f112a7f/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.554636 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="308be0ea-9f5f-4b29-aeb1-5abd31a0b17b" path="/var/lib/kubelet/pods/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.555714 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="31d8b7a1-420e-4252-a5b7-eebe8a111292" path="/var/lib/kubelet/pods/31d8b7a1-420e-4252-a5b7-eebe8a111292/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.556275 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3ab1a177-2de0-46d9-b765-d0d0649bb42e" path="/var/lib/kubelet/pods/3ab1a177-2de0-46d9-b765-d0d0649bb42e/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.557270 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3cb93b32-e0ae-4377-b9c8-fdb9842c6d59" path="/var/lib/kubelet/pods/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.558935 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="43509403-f426-496e-be36-56cef71462f5" path="/var/lib/kubelet/pods/43509403-f426-496e-be36-56cef71462f5/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.558951 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.559805 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="44663579-783b-4372-86d6-acf235a62d72" path="/var/lib/kubelet/pods/44663579-783b-4372-86d6-acf235a62d72/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.560845 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="496e6271-fb68-4057-954e-a0d97a4afa3f" path="/var/lib/kubelet/pods/496e6271-fb68-4057-954e-a0d97a4afa3f/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.561537 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49c341d1-5089-4bc2-86a0-a5e165cfcc6b" path="/var/lib/kubelet/pods/49c341d1-5089-4bc2-86a0-a5e165cfcc6b/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.562008 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="49ef4625-1d3a-4a9f-b595-c2433d32326d" path="/var/lib/kubelet/pods/49ef4625-1d3a-4a9f-b595-c2433d32326d/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.563040 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="4bb40260-dbaa-4fb0-84df-5e680505d512" path="/var/lib/kubelet/pods/4bb40260-dbaa-4fb0-84df-5e680505d512/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.563484 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5225d0e4-402f-4861-b410-819f433b1803" path="/var/lib/kubelet/pods/5225d0e4-402f-4861-b410-819f433b1803/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.564537 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5441d097-087c-4d9a-baa8-b210afa90fc9" path="/var/lib/kubelet/pods/5441d097-087c-4d9a-baa8-b210afa90fc9/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.564933 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="57a731c4-ef35-47a8-b875-bfb08a7f8011" path="/var/lib/kubelet/pods/57a731c4-ef35-47a8-b875-bfb08a7f8011/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.566069 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5b88f790-22fa-440e-b583-365168c0b23d" path="/var/lib/kubelet/pods/5b88f790-22fa-440e-b583-365168c0b23d/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.567029 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5fe579f8-e8a6-4643-bce5-a661393c4dde" path="/var/lib/kubelet/pods/5fe579f8-e8a6-4643-bce5-a661393c4dde/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.567512 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6402fda4-df10-493c-b4e5-d0569419652d" path="/var/lib/kubelet/pods/6402fda4-df10-493c-b4e5-d0569419652d/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.568647 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.568698 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6509e943-70c6-444c-bc41-48a544e36fbd" path="/var/lib/kubelet/pods/6509e943-70c6-444c-bc41-48a544e36fbd/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.569375 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6731426b-95fe-49ff-bb5f-40441049fde2" path="/var/lib/kubelet/pods/6731426b-95fe-49ff-bb5f-40441049fde2/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.570713 4875 kubelet_volumes.go:152] "Cleaned up orphaned volume subpath from pod" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volume-subpaths/run-systemd/ovnkube-controller/6" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.570822 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="6ea678ab-3438-413e-bfe3-290ae7725660" path="/var/lib/kubelet/pods/6ea678ab-3438-413e-bfe3-290ae7725660/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.572831 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7539238d-5fe0-46ed-884e-1c3b566537ec" path="/var/lib/kubelet/pods/7539238d-5fe0-46ed-884e-1c3b566537ec/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.575116 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7583ce53-e0fe-4a16-9e4d-50516596a136" path="/var/lib/kubelet/pods/7583ce53-e0fe-4a16-9e4d-50516596a136/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.576264 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="7bb08738-c794-4ee8-9972-3a62ca171029" path="/var/lib/kubelet/pods/7bb08738-c794-4ee8-9972-3a62ca171029/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.578253 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="87cf06ed-a83f-41a7-828d-70653580a8cb" path="/var/lib/kubelet/pods/87cf06ed-a83f-41a7-828d-70653580a8cb/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.579521 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8cea82b4-6893-4ddc-af9f-1bb5ae425c5b" path="/var/lib/kubelet/pods/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581107 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581481 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581526 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mm8dm\" (UniqueName: \"kubernetes.io/projected/5e5a9914-f410-4b51-8e16-77e18f7b09fb-kube-api-access-mm8dm\") pod \"node-resolver-hjshz\" (UID: \"5e5a9914-f410-4b51-8e16-77e18f7b09fb\") " pod="openshift-dns/node-resolver-hjshz" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581621 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581646 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/5e5a9914-f410-4b51-8e16-77e18f7b09fb-hosts-file\") pod \"node-resolver-hjshz\" (UID: \"5e5a9914-f410-4b51-8e16-77e18f7b09fb\") " pod="openshift-dns/node-resolver-hjshz" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581700 4875 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/8f668bae-612b-4b75-9490-919e737c6a3b-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581710 4875 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581719 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x7zkh\" (UniqueName: \"kubernetes.io/projected/6731426b-95fe-49ff-bb5f-40441049fde2-kube-api-access-x7zkh\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581728 4875 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/6402fda4-df10-493c-b4e5-d0569419652d-images\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581736 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s4n52\" (UniqueName: \"kubernetes.io/projected/925f1c65-6136-48ba-85aa-3a3b50560753-kube-api-access-s4n52\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581747 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fqsjt\" (UniqueName: \"kubernetes.io/projected/efdd0498-1daa-4136-9a4a-3b948c2293fc-kube-api-access-fqsjt\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581756 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6g6sz\" (UniqueName: \"kubernetes.io/projected/6509e943-70c6-444c-bc41-48a544e36fbd-kube-api-access-6g6sz\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581766 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-4d4hj\" (UniqueName: \"kubernetes.io/projected/3ab1a177-2de0-46d9-b765-d0d0649bb42e-kube-api-access-4d4hj\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581774 4875 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/5b88f790-22fa-440e-b583-365168c0b23d-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581783 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zgdk5\" (UniqueName: \"kubernetes.io/projected/31d8b7a1-420e-4252-a5b7-eebe8a111292-kube-api-access-zgdk5\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581791 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w9rds\" (UniqueName: \"kubernetes.io/projected/20b0d48f-5fd6-431c-a545-e3c800c7b866-kube-api-access-w9rds\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581799 4875 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/bf126b07-da06-4140-9a57-dfd54fc6b486-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581808 4875 reconciler_common.go:293] "Volume detached for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-oauth-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581819 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pj782\" (UniqueName: \"kubernetes.io/projected/b6cd30de-2eeb-49a2-ab40-9167f4560ff5-kube-api-access-pj782\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581828 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1386a44e-36a2-460c-96d0-0359d2b6f0f5-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.581967 4875 reconciler_common.go:293] "Volume detached for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-cni-binary-copy\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582021 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hosts-file\" (UniqueName: \"kubernetes.io/host-path/5e5a9914-f410-4b51-8e16-77e18f7b09fb-hosts-file\") pod \"node-resolver-hjshz\" (UID: \"5e5a9914-f410-4b51-8e16-77e18f7b09fb\") " pod="openshift-dns/node-resolver-hjshz" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582063 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/d75a4c96-2883-4a0b-bab2-0fab2b6c0b49-host-slash\") pod \"iptables-alerter-4ln5h\" (UID: \"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\") " pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582706 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-etc-kube\" (UniqueName: \"kubernetes.io/host-path/37a5e44f-9a88-4405-be8a-b645485e7312-host-etc-kube\") pod \"network-operator-58b4c7f79c-55gtf\" (UID: \"37a5e44f-9a88-4405-be8a-b645485e7312\") " pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582728 4875 reconciler_common.go:293] "Volume detached for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-certs\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582765 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nzwt7\" (UniqueName: \"kubernetes.io/projected/96b93a3a-6083-4aea-8eab-fe1aa8245ad9-kube-api-access-nzwt7\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582781 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582792 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/e7e6199b-1264-4501-8953-767f51328d08-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582804 4875 reconciler_common.go:293] "Volume detached for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-stats-auth\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582836 4875 reconciler_common.go:293] "Volume detached for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582848 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582860 4875 reconciler_common.go:293] "Volume detached for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582871 4875 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582882 4875 reconciler_common.go:293] "Volume detached for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582916 4875 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582928 4875 reconciler_common.go:293] "Volume detached for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-apiservice-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582939 4875 reconciler_common.go:293] "Volume detached for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/fda69060-fa79-4696-b1a6-7980f124bf7c-mcd-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582951 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lz9wn\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-kube-api-access-lz9wn\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582962 4875 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/87cf06ed-a83f-41a7-828d-70653580a8cb-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.582994 4875 reconciler_common.go:293] "Volume detached for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-audit\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583006 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583018 4875 reconciler_common.go:293] "Volume detached for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-available-featuregates\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583029 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wxkg8\" (UniqueName: \"kubernetes.io/projected/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-kube-api-access-wxkg8\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583041 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xcphl\" (UniqueName: \"kubernetes.io/projected/7583ce53-e0fe-4a16-9e4d-50516596a136-kube-api-access-xcphl\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583072 4875 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583084 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qs4fp\" (UniqueName: \"kubernetes.io/projected/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-kube-api-access-qs4fp\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583095 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583106 4875 reconciler_common.go:293] "Volume detached for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-cabundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583117 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/496e6271-fb68-4057-954e-a0d97a4afa3f-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583127 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0b78653f-4ff9-4508-8672-245ed9b561e3-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583159 4875 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583171 4875 reconciler_common.go:293] "Volume detached for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/3ab1a177-2de0-46d9-b765-d0d0649bb42e-package-server-manager-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583184 4875 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583196 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1386a44e-36a2-460c-96d0-0359d2b6f0f5-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583207 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/6509e943-70c6-444c-bc41-48a544e36fbd-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583237 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w4xd4\" (UniqueName: \"kubernetes.io/projected/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-kube-api-access-w4xd4\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583250 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/9d4552c7-cd75-42dd-8880-30dd377c49a4-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583259 4875 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583269 4875 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583279 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tk88c\" (UniqueName: \"kubernetes.io/projected/7539238d-5fe0-46ed-884e-1c3b566537ec-kube-api-access-tk88c\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583290 4875 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583322 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583333 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-qg5z5\" (UniqueName: \"kubernetes.io/projected/43509403-f426-496e-be36-56cef71462f5-kube-api-access-qg5z5\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583344 4875 reconciler_common.go:293] "Volume detached for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/43509403-f426-496e-be36-56cef71462f5-console-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583354 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mnrrd\" (UniqueName: \"kubernetes.io/projected/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-kube-api-access-mnrrd\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583365 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/22c825df-677d-4ca6-82db-3454ed06e783-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583394 4875 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583404 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1d611f23-29be-4491-8495-bee1670e935f-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583446 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583458 4875 reconciler_common.go:293] "Volume detached for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5fe579f8-e8a6-4643-bce5-a661393c4dde-node-bootstrap-token\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583468 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0b78653f-4ff9-4508-8672-245ed9b561e3-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583477 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583487 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dbsvg\" (UniqueName: \"kubernetes.io/projected/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9-kube-api-access-dbsvg\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583516 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-279lb\" (UniqueName: \"kubernetes.io/projected/7bb08738-c794-4ee8-9972-3a62ca171029-kube-api-access-279lb\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583527 4875 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/87cf06ed-a83f-41a7-828d-70653580a8cb-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583539 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583549 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/7583ce53-e0fe-4a16-9e4d-50516596a136-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583562 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/5441d097-087c-4d9a-baa8-b210afa90fc9-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583571 4875 reconciler_common.go:293] "Volume detached for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/25e176fe-21b4-4974-b1ed-c8b94f112a7f-signing-key\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583622 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-2d4wz\" (UniqueName: \"kubernetes.io/projected/5441d097-087c-4d9a-baa8-b210afa90fc9-kube-api-access-2d4wz\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583634 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583644 4875 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583655 4875 reconciler_common.go:293] "Volume detached for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/bf126b07-da06-4140-9a57-dfd54fc6b486-image-registry-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.583666 4875 reconciler_common.go:293] "Volume detached for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/7bb08738-c794-4ee8-9972-3a62ca171029-cni-sysctl-allowlist\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.587669 4875 reconciler_common.go:293] "Volume detached for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/c03ee662-fb2f-4fc4-a2c1-af487c19d254-service-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.587762 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.587827 4875 reconciler_common.go:293] "Volume detached for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/b6312bbd-5731-4ea0-a20f-81d5a57df44a-srv-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.588105 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="925f1c65-6136-48ba-85aa-3a3b50560753" path="/var/lib/kubelet/pods/925f1c65-6136-48ba-85aa-3a3b50560753/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.588804 4875 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589012 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="96b93a3a-6083-4aea-8eab-fe1aa8245ad9" path="/var/lib/kubelet/pods/96b93a3a-6083-4aea-8eab-fe1aa8245ad9/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589287 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6ccd8\" (UniqueName: \"kubernetes.io/projected/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-kube-api-access-6ccd8\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589319 4875 reconciler_common.go:293] "Volume detached for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-etcd-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589329 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/09efc573-dbb6-4249-bd59-9b87aba8dd28-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589338 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/496e6271-fb68-4057-954e-a0d97a4afa3f-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589347 4875 reconciler_common.go:293] "Volume detached for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/43509403-f426-496e-be36-56cef71462f5-console-oauth-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589356 4875 reconciler_common.go:293] "Volume detached for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/308be0ea-9f5f-4b29-aeb1-5abd31a0b17b-webhook-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589377 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d4lsv\" (UniqueName: \"kubernetes.io/projected/25e176fe-21b4-4974-b1ed-c8b94f112a7f-kube-api-access-d4lsv\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589385 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-zkvpv\" (UniqueName: \"kubernetes.io/projected/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-kube-api-access-zkvpv\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589394 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8tdtz\" (UniqueName: \"kubernetes.io/projected/09efc573-dbb6-4249-bd59-9b87aba8dd28-kube-api-access-8tdtz\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589404 4875 reconciler_common.go:293] "Volume detached for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-encryption-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589426 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mg5zb\" (UniqueName: \"kubernetes.io/projected/6402fda4-df10-493c-b4e5-d0569419652d-kube-api-access-mg5zb\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589439 4875 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589451 4875 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589462 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gf66m\" (UniqueName: \"kubernetes.io/projected/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-kube-api-access-gf66m\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589470 4875 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/8f668bae-612b-4b75-9490-919e737c6a3b-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589486 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jkwtn\" (UniqueName: \"kubernetes.io/projected/5b88f790-22fa-440e-b583-365168c0b23d-kube-api-access-jkwtn\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589495 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589507 4875 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/8f668bae-612b-4b75-9490-919e737c6a3b-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589520 4875 reconciler_common.go:293] "Volume detached for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/09ae3b1a-e8e7-4524-b54b-61eab6f9239a-etcd-serving-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589531 4875 reconciler_common.go:293] "Volume detached for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/925f1c65-6136-48ba-85aa-3a3b50560753-ovn-control-plane-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589543 4875 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/a31745f5-9847-4afe-82a5-3161cc66ca93-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589552 4875 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589561 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8cea82b4-6893-4ddc-af9f-1bb5ae425c5b-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589570 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/57a731c4-ef35-47a8-b875-bfb08a7f8011-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589585 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/09efc573-dbb6-4249-bd59-9b87aba8dd28-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589594 4875 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589602 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w7l8j\" (UniqueName: \"kubernetes.io/projected/01ab3dd5-8196-46d0-ad33-122e2ca51def-kube-api-access-w7l8j\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589610 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589618 4875 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/7583ce53-e0fe-4a16-9e4d-50516596a136-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589626 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-x2m85\" (UniqueName: \"kubernetes.io/projected/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d-kube-api-access-x2m85\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589634 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/e7e6199b-1264-4501-8953-767f51328d08-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589643 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/49c341d1-5089-4bc2-86a0-a5e165cfcc6b-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589650 4875 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0b574797-001e-440a-8f4e-c0be86edad0f-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589660 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rnphk\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-kube-api-access-rnphk\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589668 4875 reconciler_common.go:293] "Volume detached for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/a31745f5-9847-4afe-82a5-3161cc66ca93-metrics-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589689 4875 reconciler_common.go:293] "Volume detached for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-default-certificate\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589697 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-sb6h7\" (UniqueName: \"kubernetes.io/projected/1bf7eb37-55a3-4c65-b768-a94c82151e69-kube-api-access-sb6h7\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589705 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1bf7eb37-55a3-4c65-b768-a94c82151e69-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589715 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/5441d097-087c-4d9a-baa8-b210afa90fc9-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589723 4875 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/bf126b07-da06-4140-9a57-dfd54fc6b486-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589731 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-v47cf\" (UniqueName: \"kubernetes.io/projected/c03ee662-fb2f-4fc4-a2c1-af487c19d254-kube-api-access-v47cf\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589739 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/01ab3dd5-8196-46d0-ad33-122e2ca51def-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589747 4875 reconciler_common.go:293] "Volume detached for volume \"images\" (UniqueName: \"kubernetes.io/configmap/31d8b7a1-420e-4252-a5b7-eebe8a111292-images\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589755 4875 reconciler_common.go:293] "Volume detached for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0b574797-001e-440a-8f4e-c0be86edad0f-mcc-auth-proxy-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589764 4875 reconciler_common.go:293] "Volume detached for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/1bf7eb37-55a3-4c65-b768-a94c82151e69-etcd-client\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589772 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589780 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-249nr\" (UniqueName: \"kubernetes.io/projected/b6312bbd-5731-4ea0-a20f-81d5a57df44a-kube-api-access-249nr\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589788 4875 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/9d4552c7-cd75-42dd-8880-30dd377c49a4-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589796 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/7539238d-5fe0-46ed-884e-1c3b566537ec-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589804 4875 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/925f1c65-6136-48ba-85aa-3a3b50560753-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589812 4875 reconciler_common.go:293] "Volume detached for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/efdd0498-1daa-4136-9a4a-3b948c2293fc-webhook-certs\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589820 4875 reconciler_common.go:293] "Volume detached for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/4bb40260-dbaa-4fb0-84df-5e680505d512-multus-daemon-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589828 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/5225d0e4-402f-4861-b410-819f433b1803-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589837 4875 reconciler_common.go:293] "Volume detached for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/c03ee662-fb2f-4fc4-a2c1-af487c19d254-metrics-certs\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589845 4875 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/3cb93b32-e0ae-4377-b9c8-fdb9842c6d59-serviceca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589853 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/210d8245-ebfc-4e3b-ac4a-e21ce76f9a7c-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589861 4875 reconciler_common.go:293] "Volume detached for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0b78653f-4ff9-4508-8672-245ed9b561e3-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589869 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/6509e943-70c6-444c-bc41-48a544e36fbd-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589879 4875 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/6ea678ab-3438-413e-bfe3-290ae7725660-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589888 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bf2bz\" (UniqueName: \"kubernetes.io/projected/1d611f23-29be-4491-8495-bee1670e935f-kube-api-access-bf2bz\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589896 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jhbk2\" (UniqueName: \"kubernetes.io/projected/bd23aa5c-e532-4e53-bccf-e79f130c5ae8-kube-api-access-jhbk2\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589904 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-kfwg7\" (UniqueName: \"kubernetes.io/projected/8f668bae-612b-4b75-9490-919e737c6a3b-kube-api-access-kfwg7\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589912 4875 reconciler_common.go:293] "Volume detached for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/a0128f3a-b052-44ed-a84e-c4c8aaf17c13-samples-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589921 4875 reconciler_common.go:293] "Volume detached for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/6731426b-95fe-49ff-bb5f-40441049fde2-control-plane-machine-set-operator-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589929 4875 reconciler_common.go:293] "Volume detached for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/fda69060-fa79-4696-b1a6-7980f124bf7c-proxy-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589938 4875 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/a31745f5-9847-4afe-82a5-3161cc66ca93-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.589946 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cfbct\" (UniqueName: \"kubernetes.io/projected/57a731c4-ef35-47a8-b875-bfb08a7f8011-kube-api-access-cfbct\") on node \"crc\" DevicePath \"\"" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.591437 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.591542 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9d4552c7-cd75-42dd-8880-30dd377c49a4" path="/var/lib/kubelet/pods/9d4552c7-cd75-42dd-8880-30dd377c49a4/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.592470 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a0128f3a-b052-44ed-a84e-c4c8aaf17c13" path="/var/lib/kubelet/pods/a0128f3a-b052-44ed-a84e-c4c8aaf17c13/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.593627 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a31745f5-9847-4afe-82a5-3161cc66ca93" path="/var/lib/kubelet/pods/a31745f5-9847-4afe-82a5-3161cc66ca93/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.595047 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b11524ee-3fca-4b1b-9cdf-6da289fdbc7d" path="/var/lib/kubelet/pods/b11524ee-3fca-4b1b-9cdf-6da289fdbc7d/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.595722 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6312bbd-5731-4ea0-a20f-81d5a57df44a" path="/var/lib/kubelet/pods/b6312bbd-5731-4ea0-a20f-81d5a57df44a/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.596700 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b6cd30de-2eeb-49a2-ab40-9167f4560ff5" path="/var/lib/kubelet/pods/b6cd30de-2eeb-49a2-ab40-9167f4560ff5/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.597349 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bc5039c0-ea34-426b-a2b7-fbbc87b49a6d" path="/var/lib/kubelet/pods/bc5039c0-ea34-426b-a2b7-fbbc87b49a6d/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.598248 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bd23aa5c-e532-4e53-bccf-e79f130c5ae8" path="/var/lib/kubelet/pods/bd23aa5c-e532-4e53-bccf-e79f130c5ae8/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.599168 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bf126b07-da06-4140-9a57-dfd54fc6b486" path="/var/lib/kubelet/pods/bf126b07-da06-4140-9a57-dfd54fc6b486/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.600342 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="c03ee662-fb2f-4fc4-a2c1-af487c19d254" path="/var/lib/kubelet/pods/c03ee662-fb2f-4fc4-a2c1-af487c19d254/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.600786 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d" path="/var/lib/kubelet/pods/cd70aa09-68dd-4d64-bd6f-156fe6d1dc6d/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.601225 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e7e6199b-1264-4501-8953-767f51328d08" path="/var/lib/kubelet/pods/e7e6199b-1264-4501-8953-767f51328d08/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.602125 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="efdd0498-1daa-4136-9a4a-3b948c2293fc" path="/var/lib/kubelet/pods/efdd0498-1daa-4136-9a4a-3b948c2293fc/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.602899 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mm8dm\" (UniqueName: \"kubernetes.io/projected/5e5a9914-f410-4b51-8e16-77e18f7b09fb-kube-api-access-mm8dm\") pod \"node-resolver-hjshz\" (UID: \"5e5a9914-f410-4b51-8e16-77e18f7b09fb\") " pod="openshift-dns/node-resolver-hjshz" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.603743 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f88749ec-7931-4ee7-b3fc-1ec5e11f92e9" path="/var/lib/kubelet/pods/f88749ec-7931-4ee7-b3fc-1ec5e11f92e9/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.604782 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.604745 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fda69060-fa79-4696-b1a6-7980f124bf7c" path="/var/lib/kubelet/pods/fda69060-fa79-4696-b1a6-7980f124bf7c/volumes" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.615704 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.625853 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.632224 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.642522 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.651247 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.652474 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.652839 4875 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738" exitCode=255 Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.652873 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerDied","Data":"b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738"} Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.663261 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.664439 4875 scope.go:117] "RemoveContainer" containerID="b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.664707 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.676196 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.687580 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.697514 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.704210 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.711863 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.719860 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.757642 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.764779 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-operator/iptables-alerter-4ln5h" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.771922 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-node-identity/network-node-identity-vrzqb" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.787581 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/node-resolver-hjshz" Nov 26 00:07:27 crc kubenswrapper[4875]: W1126 00:07:27.788349 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod37a5e44f_9a88_4405_be8a_b645485e7312.slice/crio-f3cb44b8de1c44aaca6d3e215c1c820be76ea821eb8c3ebf94d2ad5ec9935f6d WatchSource:0}: Error finding container f3cb44b8de1c44aaca6d3e215c1c820be76ea821eb8c3ebf94d2ad5ec9935f6d: Status 404 returned error can't find the container with id f3cb44b8de1c44aaca6d3e215c1c820be76ea821eb8c3ebf94d2ad5ec9935f6d Nov 26 00:07:27 crc kubenswrapper[4875]: W1126 00:07:27.789677 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd75a4c96_2883_4a0b_bab2_0fab2b6c0b49.slice/crio-e937f6eec994cc30ea222240bdcc8d7facc4ae7880f79608903ec24e8644fb70 WatchSource:0}: Error finding container e937f6eec994cc30ea222240bdcc8d7facc4ae7880f79608903ec24e8644fb70: Status 404 returned error can't find the container with id e937f6eec994cc30ea222240bdcc8d7facc4ae7880f79608903ec24e8644fb70 Nov 26 00:07:27 crc kubenswrapper[4875]: W1126 00:07:27.791599 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podef543e1b_8068_4ea3_b32a_61027b32e95d.slice/crio-baec9b292e890afaf857f58c2d779caa0211f59a7a1b3af2d5456646741dc642 WatchSource:0}: Error finding container baec9b292e890afaf857f58c2d779caa0211f59a7a1b3af2d5456646741dc642: Status 404 returned error can't find the container with id baec9b292e890afaf857f58c2d779caa0211f59a7a1b3af2d5456646741dc642 Nov 26 00:07:27 crc kubenswrapper[4875]: W1126 00:07:27.801312 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod5e5a9914_f410_4b51_8e16_77e18f7b09fb.slice/crio-702c5cfd1aaf1f53518898c681d2e7b3f86447def812bc0fdeaee0e579c886b1 WatchSource:0}: Error finding container 702c5cfd1aaf1f53518898c681d2e7b3f86447def812bc0fdeaee0e579c886b1: Status 404 returned error can't find the container with id 702c5cfd1aaf1f53518898c681d2e7b3f86447def812bc0fdeaee0e579c886b1 Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.945243 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.948711 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.957471 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.965667 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.977893 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.983280 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-daemon-9mztb"] Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.983652 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.985391 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.986178 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.988657 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.988679 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.989455 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.991824 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:27 crc kubenswrapper[4875]: E1126 00:07:27.991996 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:07:28.991978921 +0000 UTC m=+22.181954616 (durationBeforeRetry 1s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:07:27 crc kubenswrapper[4875]: I1126 00:07:27.996175 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.008780 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.018953 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.023666 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/kube-controller-manager-crc"] Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.033533 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.042757 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.050642 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.059939 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.068662 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.077650 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.085164 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.092554 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-proxy-tls\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.092598 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-rootfs\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.092617 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-mcd-auth-proxy-config\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.092642 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.092783 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.092801 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.092812 4875 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.092822 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.092928 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:29.092866806 +0000 UTC m=+22.282842581 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.092944 4875 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.093314 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:29.093301067 +0000 UTC m=+22.283276762 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.093070 4875 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.093397 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:29.09339168 +0000 UTC m=+22.283367375 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.092969 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.093457 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jt2ft\" (UniqueName: \"kubernetes.io/projected/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-kube-api-access-jt2ft\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.093483 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.093600 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.093619 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.093630 4875 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:28 crc kubenswrapper[4875]: E1126 00:07:28.093676 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:29.093665046 +0000 UTC m=+22.283640771 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.096160 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.106808 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.122140 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.136573 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.151493 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.193873 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-rootfs\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.194186 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-mcd-auth-proxy-config\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.194342 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jt2ft\" (UniqueName: \"kubernetes.io/projected/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-kube-api-access-jt2ft\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.194479 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-proxy-tls\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.194081 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"rootfs\" (UniqueName: \"kubernetes.io/host-path/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-rootfs\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.195947 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcd-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-mcd-auth-proxy-config\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.200884 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-proxy-tls\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.216311 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jt2ft\" (UniqueName: \"kubernetes.io/projected/0d4fb5a7-3ade-467a-9640-d744b1ef0c8e-kube-api-access-jt2ft\") pod \"machine-config-daemon-9mztb\" (UID: \"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\") " pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.302741 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:07:28 crc kubenswrapper[4875]: W1126 00:07:28.313548 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-7b9bd35b782db862c0d10d45af6db91f7271a261a48ece6f2c6ff4ebad25cb65 WatchSource:0}: Error finding container 7b9bd35b782db862c0d10d45af6db91f7271a261a48ece6f2c6ff4ebad25cb65: Status 404 returned error can't find the container with id 7b9bd35b782db862c0d10d45af6db91f7271a261a48ece6f2c6ff4ebad25cb65 Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.351407 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-additional-cni-plugins-qvrsq"] Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.352347 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.353181 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-8wvqp"] Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.353593 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.355188 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.355255 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.355202 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.355547 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.355678 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.355785 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.355901 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.362983 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.374714 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.392654 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.396887 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6x47n\" (UniqueName: \"kubernetes.io/projected/02732a7d-257e-4d74-8a08-cea35074fc4e-kube-api-access-6x47n\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.396933 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-var-lib-cni-multus\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.396954 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-22mhs\" (UniqueName: \"kubernetes.io/projected/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-kube-api-access-22mhs\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.396974 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.396993 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-run-netns\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397014 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/02732a7d-257e-4d74-8a08-cea35074fc4e-cni-binary-copy\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397031 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-socket-dir-parent\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397049 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-run-multus-certs\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397069 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-etc-kubernetes\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397087 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/02732a7d-257e-4d74-8a08-cea35074fc4e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397109 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-cnibin\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397129 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-var-lib-cni-bin\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397145 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-conf-dir\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397262 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-cni-dir\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397303 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-system-cni-dir\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397340 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-var-lib-kubelet\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397475 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-system-cni-dir\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397594 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-run-k8s-cni-cncf-io\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397638 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-os-release\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397664 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-os-release\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397807 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-cni-binary-copy\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397849 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-hostroot\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397881 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-daemon-config\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.397911 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-cnibin\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.407903 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.425438 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.435732 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.444050 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.453194 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.465325 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.475334 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.487990 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.498745 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-os-release\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.498964 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-cni-binary-copy\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499006 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-os-release\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499081 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-hostroot\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499160 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-daemon-config\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499182 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-cnibin\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499207 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-22mhs\" (UniqueName: \"kubernetes.io/projected/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-kube-api-access-22mhs\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499227 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6x47n\" (UniqueName: \"kubernetes.io/projected/02732a7d-257e-4d74-8a08-cea35074fc4e-kube-api-access-6x47n\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499248 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-var-lib-cni-multus\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499268 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499340 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-run-netns\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499359 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-run-multus-certs\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499380 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/02732a7d-257e-4d74-8a08-cea35074fc4e-cni-binary-copy\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499399 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-socket-dir-parent\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499433 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-cnibin\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499453 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-etc-kubernetes\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499470 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/02732a7d-257e-4d74-8a08-cea35074fc4e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499490 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-conf-dir\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499509 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-var-lib-cni-bin\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499531 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-cni-dir\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499508 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-multus\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-var-lib-cni-multus\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499577 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-var-lib-kubelet\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499571 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-run-netns\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499529 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-multus-certs\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-run-multus-certs\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499552 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-kubelet\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-var-lib-kubelet\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499615 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-conf-dir\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-conf-dir\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499559 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-socket-dir-parent\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-socket-dir-parent\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499649 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-system-cni-dir\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499687 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-bin\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-var-lib-cni-bin\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499673 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-cnibin\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499710 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-kubernetes\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-etc-kubernetes\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499762 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-system-cni-dir\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499785 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-cni-dir\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499782 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-system-cni-dir\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499807 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cnibin\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-cnibin\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499846 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-os-release\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499866 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-run-k8s-cni-cncf-io\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499861 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"system-cni-dir\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-system-cni-dir\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499925 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"os-release\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-os-release\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.499926 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-k8s-cni-cncf-io\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-host-run-k8s-cni-cncf-io\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.500198 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tuning-conf-dir\" (UniqueName: \"kubernetes.io/host-path/02732a7d-257e-4d74-8a08-cea35074fc4e-tuning-conf-dir\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.500248 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [webhook approver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":false,\\\"restartCount\\\":6,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.500374 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"hostroot\" (UniqueName: \"kubernetes.io/host-path/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-hostroot\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.500432 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-sysctl-allowlist\" (UniqueName: \"kubernetes.io/configmap/02732a7d-257e-4d74-8a08-cea35074fc4e-cni-sysctl-allowlist\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.500436 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/02732a7d-257e-4d74-8a08-cea35074fc4e-cni-binary-copy\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.500864 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cni-binary-copy\" (UniqueName: \"kubernetes.io/configmap/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-cni-binary-copy\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.500957 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"multus-daemon-config\" (UniqueName: \"kubernetes.io/configmap/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-multus-daemon-config\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.509046 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [dns-node-resolver]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.517961 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6x47n\" (UniqueName: \"kubernetes.io/projected/02732a7d-257e-4d74-8a08-cea35074fc4e-kube-api-access-6x47n\") pod \"multus-additional-cni-plugins-qvrsq\" (UID: \"02732a7d-257e-4d74-8a08-cea35074fc4e\") " pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.518631 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-22mhs\" (UniqueName: \"kubernetes.io/projected/9fd65949-6a8a-4828-baa4-1c88c71d5ed2-kube-api-access-22mhs\") pod \"multus-8wvqp\" (UID: \"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\") " pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.523074 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.535980 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-operator]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":false,\\\"restartCount\\\":5,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.549906 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": dial tcp 127.0.0.1:9743: connect: connection refused" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.562395 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [machine-config-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.582111 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.613886 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.657426 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.658197 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.660251 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"f4b27818a5e8e43d0dc095d08835c792","Type":"ContainerStarted","Data":"cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.660589 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.661289 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"e937f6eec994cc30ea222240bdcc8d7facc4ae7880f79608903ec24e8644fb70"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.662804 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.662851 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" event={"ID":"37a5e44f-9a88-4405-be8a-b645485e7312","Type":"ContainerStarted","Data":"f3cb44b8de1c44aaca6d3e215c1c820be76ea821eb8c3ebf94d2ad5ec9935f6d"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.664452 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerStarted","Data":"546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.664512 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerStarted","Data":"2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.664527 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerStarted","Data":"7b9bd35b782db862c0d10d45af6db91f7271a261a48ece6f2c6ff4ebad25cb65"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.666173 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-hjshz" event={"ID":"5e5a9914-f410-4b51-8e16-77e18f7b09fb","Type":"ContainerStarted","Data":"6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.666341 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/node-resolver-hjshz" event={"ID":"5e5a9914-f410-4b51-8e16-77e18f7b09fb","Type":"ContainerStarted","Data":"702c5cfd1aaf1f53518898c681d2e7b3f86447def812bc0fdeaee0e579c886b1"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.668205 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.668245 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.668257 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" event={"ID":"ef543e1b-8068-4ea3-b32a-61027b32e95d","Type":"ContainerStarted","Data":"baec9b292e890afaf857f58c2d779caa0211f59a7a1b3af2d5456646741dc642"} Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.677723 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.681015 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-8wvqp" Nov 26 00:07:28 crc kubenswrapper[4875]: W1126 00:07:28.691624 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod02732a7d_257e_4d74_8a08_cea35074fc4e.slice/crio-afc9ab24035205f1ca8e694323067aa8a891354bd3811dd55fae51acae91346d WatchSource:0}: Error finding container afc9ab24035205f1ca8e694323067aa8a891354bd3811dd55fae51acae91346d: Status 404 returned error can't find the container with id afc9ab24035205f1ca8e694323067aa8a891354bd3811dd55fae51acae91346d Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.693240 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.704678 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hh8lx"] Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.705628 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: W1126 00:07:28.716623 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9fd65949_6a8a_4828_baa4_1c88c71d5ed2.slice/crio-e922516d7a8f618c688227f0b8313993d699935797e3bca268bb68312bc83a97 WatchSource:0}: Error finding container e922516d7a8f618c688227f0b8313993d699935797e3bca268bb68312bc83a97: Status 404 returned error can't find the container with id e922516d7a8f618c688227f0b8313993d699935797e3bca268bb68312bc83a97 Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.747921 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.748399 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.780824 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.784845 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802174 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-ovn-kubernetes\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802206 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mr9lb\" (UniqueName: \"kubernetes.io/projected/8fe08e0b-f339-4beb-953e-6e0180c6c945-kube-api-access-mr9lb\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802231 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovn-node-metrics-cert\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802282 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-netns\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802298 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-var-lib-openvswitch\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802313 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-openvswitch\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802327 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-etc-openvswitch\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802342 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-script-lib\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802357 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-systemd-units\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802371 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-systemd\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802386 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-config\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802429 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802447 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-netd\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802472 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-kubelet\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802487 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-log-socket\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802500 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-slash\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802514 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-ovn\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802527 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-node-log\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802543 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-env-overrides\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.802558 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-bin\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.805806 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.830742 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.845725 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.870919 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903682 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-netd\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903716 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-kubelet\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903731 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-log-socket\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903750 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-slash\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903766 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-ovn\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903782 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-node-log\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903800 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-env-overrides\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903816 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-bin\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903814 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-netd\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903834 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-ovn-kubernetes\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903862 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-log-socket\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903893 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mr9lb\" (UniqueName: \"kubernetes.io/projected/8fe08e0b-f339-4beb-953e-6e0180c6c945-kube-api-access-mr9lb\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903912 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-kubelet\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903991 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovn-node-metrics-cert\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904004 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-slash\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.903873 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-ovn-kubernetes\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904053 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-bin\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904057 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-ovn\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904033 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-node-log\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904100 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-netns\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904122 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-var-lib-openvswitch\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904143 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-openvswitch\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904162 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-etc-openvswitch\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904166 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-netns\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904178 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-script-lib\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904229 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-openvswitch\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904257 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-systemd-units\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904285 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-systemd\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904292 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-var-lib-openvswitch\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904311 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-config\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904325 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-systemd-units\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904356 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-systemd\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904353 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904373 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-env-overrides\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904500 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-etc-openvswitch\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904550 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904803 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-script-lib\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.904976 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-config\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.908755 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovn-node-metrics-cert\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.918253 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.941243 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mr9lb\" (UniqueName: \"kubernetes.io/projected/8fe08e0b-f339-4beb-953e-6e0180c6c945-kube-api-access-mr9lb\") pod \"ovnkube-node-hh8lx\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:28 crc kubenswrapper[4875]: I1126 00:07:28.975585 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.005153 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.005388 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:07:31.005339699 +0000 UTC m=+24.195315534 (durationBeforeRetry 2s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.014521 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.021234 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:29 crc kubenswrapper[4875]: W1126 00:07:29.034971 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8fe08e0b_f339_4beb_953e_6e0180c6c945.slice/crio-247927d0cd1075c4092cddef6dfe1a6d11cb3f6bbaacdbdd4a9afde59acfc187 WatchSource:0}: Error finding container 247927d0cd1075c4092cddef6dfe1a6d11cb3f6bbaacdbdd4a9afde59acfc187: Status 404 returned error can't find the container with id 247927d0cd1075c4092cddef6dfe1a6d11cb3f6bbaacdbdd4a9afde59acfc187 Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.044645 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-etcd/etcd-crc" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.056275 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-etcd/etcd-crc" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.058850 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.075565 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd/etcd-crc"] Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.107013 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.107063 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.107092 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.107119 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.107190 4875 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.107270 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.107293 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.107305 4875 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.107275 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:31.107250001 +0000 UTC m=+24.297225696 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.107493 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:31.107404655 +0000 UTC m=+24.297380350 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.107520 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.107566 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.107581 4875 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.107668 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:31.107645321 +0000 UTC m=+24.297621006 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.108543 4875 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.108696 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:31.108668216 +0000 UTC m=+24.298643931 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.114270 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.154945 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.192984 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.233645 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.272979 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.319911 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.353523 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.392671 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.431628 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.475849 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.518113 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.528068 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.528271 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.528504 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.528716 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.528750 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:29 crc kubenswrapper[4875]: E1126 00:07:29.528949 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.557392 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.593492 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.635037 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.671134 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.671427 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21" exitCode=0 Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.671466 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21"} Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.671489 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"247927d0cd1075c4092cddef6dfe1a6d11cb3f6bbaacdbdd4a9afde59acfc187"} Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.672870 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8wvqp" event={"ID":"9fd65949-6a8a-4828-baa4-1c88c71d5ed2","Type":"ContainerStarted","Data":"e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8"} Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.672892 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8wvqp" event={"ID":"9fd65949-6a8a-4828-baa4-1c88c71d5ed2","Type":"ContainerStarted","Data":"e922516d7a8f618c688227f0b8313993d699935797e3bca268bb68312bc83a97"} Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.675086 4875 generic.go:334] "Generic (PLEG): container finished" podID="02732a7d-257e-4d74-8a08-cea35074fc4e" containerID="4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72" exitCode=0 Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.675290 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" event={"ID":"02732a7d-257e-4d74-8a08-cea35074fc4e","Type":"ContainerDied","Data":"4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72"} Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.675320 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" event={"ID":"02732a7d-257e-4d74-8a08-cea35074fc4e","Type":"ContainerStarted","Data":"afc9ab24035205f1ca8e694323067aa8a891354bd3811dd55fae51acae91346d"} Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.710340 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.753364 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.795062 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [egress-router-binary-copy cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.839331 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.870274 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.882926 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/node-ca-tvsgw"] Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.884342 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-tvsgw" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.902761 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.922194 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.943355 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.963274 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 00:07:29 crc kubenswrapper[4875]: I1126 00:07:29.997286 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [kubecfg-setup]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:29Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.021608 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/be016325-bd90-4293-9d92-bc8cfba8a463-serviceca\") pod \"node-ca-tvsgw\" (UID: \"be016325-bd90-4293-9d92-bc8cfba8a463\") " pod="openshift-image-registry/node-ca-tvsgw" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.021713 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-97pr5\" (UniqueName: \"kubernetes.io/projected/be016325-bd90-4293-9d92-bc8cfba8a463-kube-api-access-97pr5\") pod \"node-ca-tvsgw\" (UID: \"be016325-bd90-4293-9d92-bc8cfba8a463\") " pod="openshift-image-registry/node-ca-tvsgw" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.021749 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/be016325-bd90-4293-9d92-bc8cfba8a463-host\") pod \"node-ca-tvsgw\" (UID: \"be016325-bd90-4293-9d92-bc8cfba8a463\") " pod="openshift-image-registry/node-ca-tvsgw" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.031679 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.072559 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.123320 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-97pr5\" (UniqueName: \"kubernetes.io/projected/be016325-bd90-4293-9d92-bc8cfba8a463-kube-api-access-97pr5\") pod \"node-ca-tvsgw\" (UID: \"be016325-bd90-4293-9d92-bc8cfba8a463\") " pod="openshift-image-registry/node-ca-tvsgw" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.123367 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host\" (UniqueName: \"kubernetes.io/host-path/be016325-bd90-4293-9d92-bc8cfba8a463-host\") pod \"node-ca-tvsgw\" (UID: \"be016325-bd90-4293-9d92-bc8cfba8a463\") " pod="openshift-image-registry/node-ca-tvsgw" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.123389 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/be016325-bd90-4293-9d92-bc8cfba8a463-serviceca\") pod \"node-ca-tvsgw\" (UID: \"be016325-bd90-4293-9d92-bc8cfba8a463\") " pod="openshift-image-registry/node-ca-tvsgw" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.123818 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host\" (UniqueName: \"kubernetes.io/host-path/be016325-bd90-4293-9d92-bc8cfba8a463-host\") pod \"node-ca-tvsgw\" (UID: \"be016325-bd90-4293-9d92-bc8cfba8a463\") " pod="openshift-image-registry/node-ca-tvsgw" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.124265 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/be016325-bd90-4293-9d92-bc8cfba8a463-serviceca\") pod \"node-ca-tvsgw\" (UID: \"be016325-bd90-4293-9d92-bc8cfba8a463\") " pod="openshift-image-registry/node-ca-tvsgw" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.141765 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.163231 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-97pr5\" (UniqueName: \"kubernetes.io/projected/be016325-bd90-4293-9d92-bc8cfba8a463-kube-api-access-97pr5\") pod \"node-ca-tvsgw\" (UID: \"be016325-bd90-4293-9d92-bc8cfba8a463\") " pod="openshift-image-registry/node-ca-tvsgw" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.173714 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.225063 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.261513 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.297235 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.332525 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.343098 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/node-ca-tvsgw" Nov 26 00:07:30 crc kubenswrapper[4875]: W1126 00:07:30.357239 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podbe016325_bd90_4293_9d92_bc8cfba8a463.slice/crio-a119879678f1272f148e875d21140524596b2b3f37eef2a1a427d0add1c8a606 WatchSource:0}: Error finding container a119879678f1272f148e875d21140524596b2b3f37eef2a1a427d0add1c8a606: Status 404 returned error can't find the container with id a119879678f1272f148e875d21140524596b2b3f37eef2a1a427d0add1c8a606 Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.373203 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.411105 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.449356 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.492474 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.537251 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.573230 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.610555 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [iptables-alerter]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.652732 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.679726 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" event={"ID":"02732a7d-257e-4d74-8a08-cea35074fc4e","Type":"ContainerStarted","Data":"026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317"} Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.681366 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" event={"ID":"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49","Type":"ContainerStarted","Data":"5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5"} Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.684850 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e"} Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.684886 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886"} Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.684894 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826"} Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.686022 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-tvsgw" event={"ID":"be016325-bd90-4293-9d92-bc8cfba8a463","Type":"ContainerStarted","Data":"a119879678f1272f148e875d21140524596b2b3f37eef2a1a427d0add1c8a606"} Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.692790 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.733952 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.780456 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.813430 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.850393 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.892196 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.932075 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:30 crc kubenswrapper[4875]: I1126 00:07:30.973214 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:30Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.011426 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.033518 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.033665 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:07:35.033640346 +0000 UTC m=+28.223616041 (durationBeforeRetry 4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.055126 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [cni-plugins bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.093267 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.133565 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.134864 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.134914 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.134948 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.134969 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135064 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135085 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135092 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135101 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135108 4875 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135147 4875 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135162 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:35.135143457 +0000 UTC m=+28.325119212 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135197 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:35.135181238 +0000 UTC m=+28.325156993 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135239 4875 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135261 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:35.13525565 +0000 UTC m=+28.325231345 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135112 4875 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.135295 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:35.135288321 +0000 UTC m=+28.325264116 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.175814 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.208708 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.252491 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.290160 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.330525 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.528866 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.528901 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.528866 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.529045 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.528998 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:31 crc kubenswrapper[4875]: E1126 00:07:31.529517 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.692806 4875 generic.go:334] "Generic (PLEG): container finished" podID="02732a7d-257e-4d74-8a08-cea35074fc4e" containerID="026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317" exitCode=0 Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.692874 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" event={"ID":"02732a7d-257e-4d74-8a08-cea35074fc4e","Type":"ContainerDied","Data":"026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317"} Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.697380 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a"} Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.697444 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84"} Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.697457 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2"} Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.699280 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/node-ca-tvsgw" event={"ID":"be016325-bd90-4293-9d92-bc8cfba8a463","Type":"ContainerStarted","Data":"6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2"} Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.707187 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.722528 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.737585 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.749741 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.769741 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.780080 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"message\\\":\\\"containers with unready status: [node-ca]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.791261 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.801825 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.814704 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.834881 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.848109 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.858909 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.871380 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.889013 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.932585 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:31 crc kubenswrapper[4875]: I1126 00:07:31.970756 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:31Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.011162 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.056151 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.089841 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.131762 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.179313 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.209664 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.250371 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.290652 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.329366 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.374603 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [bond-cni-plugin routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.409170 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.452755 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.494650 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.544766 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.706136 4875 generic.go:334] "Generic (PLEG): container finished" podID="02732a7d-257e-4d74-8a08-cea35074fc4e" containerID="3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556" exitCode=0 Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.706763 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" event={"ID":"02732a7d-257e-4d74-8a08-cea35074fc4e","Type":"ContainerDied","Data":"3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556"} Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.746872 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.758723 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.777276 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.791208 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.802250 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.814632 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.824929 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.851674 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.888593 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.930125 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:32 crc kubenswrapper[4875]: I1126 00:07:32.977086 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:32Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.010739 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.050783 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.089775 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.110536 4875 kubelet_node_status.go:401] "Setting node annotation to enable volume controller attach/detach" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.112372 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.112401 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.112425 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.112546 4875 kubelet_node_status.go:76] "Attempting to register node" node="crc" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.132306 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [routeoverride-cni whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.163825 4875 kubelet_node_status.go:115] "Node was previously registered" node="crc" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.164030 4875 kubelet_node_status.go:79] "Successfully registered node" node="crc" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.165005 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.165038 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.165049 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.165064 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.165074 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: E1126 00:07:33.176109 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.179537 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.179563 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.179571 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.179585 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.179598 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: E1126 00:07:33.189142 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.191964 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.191995 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.192007 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.192024 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.192038 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: E1126 00:07:33.202222 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.205095 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.205131 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.205141 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.205155 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.205164 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: E1126 00:07:33.218048 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.221154 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.221186 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.221199 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.221212 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.221222 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: E1126 00:07:33.230637 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:33Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: E1126 00:07:33.230752 4875 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.232171 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.232198 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.232205 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.232218 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.232226 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.334943 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.334978 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.334987 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.334999 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.335009 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.438173 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.438247 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.438273 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.438303 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.438330 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.528185 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:33 crc kubenswrapper[4875]: E1126 00:07:33.528328 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.528364 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.528474 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:33 crc kubenswrapper[4875]: E1126 00:07:33.528558 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:33 crc kubenswrapper[4875]: E1126 00:07:33.528657 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.541776 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.541850 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.541874 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.541906 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.541928 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.644810 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.644864 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.644876 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.644893 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.644906 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.711716 4875 generic.go:334] "Generic (PLEG): container finished" podID="02732a7d-257e-4d74-8a08-cea35074fc4e" containerID="ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb" exitCode=0 Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.711781 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" event={"ID":"02732a7d-257e-4d74-8a08-cea35074fc4e","Type":"ContainerDied","Data":"ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb"} Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.716978 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14"} Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.727132 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.739799 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.747092 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.747141 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.747155 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.747176 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.747191 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.761214 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.771985 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.789215 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.810846 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.823887 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni-bincopy whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.838173 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.849442 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.849486 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.849498 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.849515 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.849527 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.851669 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.871968 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.884077 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.894719 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.908654 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.919044 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.935313 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:33Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.951132 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.951173 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.951185 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.951202 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:33 crc kubenswrapper[4875]: I1126 00:07:33.951214 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:33Z","lastTransitionTime":"2025-11-26T00:07:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.054866 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.054937 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.054951 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.054977 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.054992 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:34Z","lastTransitionTime":"2025-11-26T00:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.157228 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.157273 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.157281 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.157295 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.157347 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:34Z","lastTransitionTime":"2025-11-26T00:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.261297 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.261358 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.261368 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.261384 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.261393 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:34Z","lastTransitionTime":"2025-11-26T00:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.368621 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.368687 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.368701 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.368721 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.368735 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:34Z","lastTransitionTime":"2025-11-26T00:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.471476 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.471524 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.471533 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.471551 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.471563 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:34Z","lastTransitionTime":"2025-11-26T00:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.574931 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.574965 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.574977 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.574991 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.575002 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:34Z","lastTransitionTime":"2025-11-26T00:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.677174 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.677254 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.677279 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.677307 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.677326 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:34Z","lastTransitionTime":"2025-11-26T00:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.723954 4875 generic.go:334] "Generic (PLEG): container finished" podID="02732a7d-257e-4d74-8a08-cea35074fc4e" containerID="3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92" exitCode=0 Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.723999 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" event={"ID":"02732a7d-257e-4d74-8a08-cea35074fc4e","Type":"ContainerDied","Data":"3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92"} Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.739271 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.761651 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.774502 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.780074 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.780161 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.780187 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.780218 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.780240 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:34Z","lastTransitionTime":"2025-11-26T00:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.798390 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovn-controller ovn-acl-logging kube-rbac-proxy-node kube-rbac-proxy-ovn-metrics northd nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.810793 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.825465 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.852755 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.874880 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.884848 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.884902 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.884917 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.884940 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.884959 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:34Z","lastTransitionTime":"2025-11-26T00:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.897813 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.914049 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.926993 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.936204 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.947120 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.964163 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.977285 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:34Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.987157 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.987197 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.987208 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.987223 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:34 crc kubenswrapper[4875]: I1126 00:07:34.987237 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:34Z","lastTransitionTime":"2025-11-26T00:07:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.077111 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.077354 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:07:43.077324312 +0000 UTC m=+36.267300027 (durationBeforeRetry 8s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.089913 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.089947 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.089955 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.089969 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.089982 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:35Z","lastTransitionTime":"2025-11-26T00:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.178229 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.178274 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.178300 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.178329 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178350 4875 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178447 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:43.178399512 +0000 UTC m=+36.368375207 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178460 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178475 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178487 4875 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178529 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:43.178516465 +0000 UTC m=+36.368492160 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178577 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178620 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178640 4875 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178727 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:43.178702729 +0000 UTC m=+36.368678454 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178853 4875 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.178912 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:43.178898314 +0000 UTC m=+36.368874049 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.192391 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.192450 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.192461 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.192475 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.192486 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:35Z","lastTransitionTime":"2025-11-26T00:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.295100 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.295130 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.295141 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.295155 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.295164 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:35Z","lastTransitionTime":"2025-11-26T00:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.397298 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.397335 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.397348 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.397365 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.397377 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:35Z","lastTransitionTime":"2025-11-26T00:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.500294 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.500320 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.500328 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.500341 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.500351 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:35Z","lastTransitionTime":"2025-11-26T00:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.529011 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.529025 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.529122 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.529168 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.529279 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:35 crc kubenswrapper[4875]: E1126 00:07:35.529350 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.603652 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.603706 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.603718 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.603743 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.603755 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:35Z","lastTransitionTime":"2025-11-26T00:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.707282 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.707392 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.707460 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.707499 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.707529 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:35Z","lastTransitionTime":"2025-11-26T00:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.735231 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.735755 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.735780 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.739404 4875 generic.go:334] "Generic (PLEG): container finished" podID="02732a7d-257e-4d74-8a08-cea35074fc4e" containerID="9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432" exitCode=0 Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.739515 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" event={"ID":"02732a7d-257e-4d74-8a08-cea35074fc4e","Type":"ContainerDied","Data":"9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.749735 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.764198 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.767709 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.768001 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.778038 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.796493 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.809757 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.809804 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.809822 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.809845 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.809859 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:35Z","lastTransitionTime":"2025-11-26T00:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.809957 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.822035 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.842460 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.856088 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.870969 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.883441 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.900284 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with incomplete status: [whereabouts-cni]\\\",\\\"reason\\\":\\\"ContainersNotInitialized\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.913586 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.913639 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.913650 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.913664 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.913692 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:35Z","lastTransitionTime":"2025-11-26T00:07:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.920280 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [nbdb sbdb ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.934123 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.951597 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.964787 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.977145 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:35 crc kubenswrapper[4875]: I1126 00:07:35.992246 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus-additional-cni-plugins]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"PodInitializing\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.009935 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.016091 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.016123 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.016132 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.016147 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.016156 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:36Z","lastTransitionTime":"2025-11-26T00:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.020139 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.036559 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.048781 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.059688 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.073080 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.083725 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.093490 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.103544 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.118224 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.118281 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.118298 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.118315 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.118342 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:36Z","lastTransitionTime":"2025-11-26T00:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.119190 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.141740 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.152997 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.165478 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.220199 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.220243 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.220252 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.220267 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.220275 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:36Z","lastTransitionTime":"2025-11-26T00:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.322893 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.322938 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.322951 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.322968 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.322980 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:36Z","lastTransitionTime":"2025-11-26T00:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.425388 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.425425 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.425433 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.425445 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.425454 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:36Z","lastTransitionTime":"2025-11-26T00:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.527552 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.527581 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.527591 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.527603 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.527612 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:36Z","lastTransitionTime":"2025-11-26T00:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.629973 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.630021 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.630037 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.630057 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.630072 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:36Z","lastTransitionTime":"2025-11-26T00:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.732354 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.732390 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.732401 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.732437 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.732451 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:36Z","lastTransitionTime":"2025-11-26T00:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.745467 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" event={"ID":"02732a7d-257e-4d74-8a08-cea35074fc4e","Type":"ContainerStarted","Data":"ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758"} Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.745534 4875 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.760681 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.779988 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.803884 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.815981 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.833788 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.834751 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.834780 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.834792 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.834809 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.834820 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:36Z","lastTransitionTime":"2025-11-26T00:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.847649 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.859218 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.871141 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.883894 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.894278 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.901988 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.914501 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.934209 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.936977 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.937006 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.937020 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.937037 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.937050 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:36Z","lastTransitionTime":"2025-11-26T00:07:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.948267 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:36 crc kubenswrapper[4875]: I1126 00:07:36.961800 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:36Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.040826 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.040902 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.040921 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.040953 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.040979 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:37Z","lastTransitionTime":"2025-11-26T00:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.143384 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.143506 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.143530 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.143573 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.143596 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:37Z","lastTransitionTime":"2025-11-26T00:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.247113 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.247182 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.247194 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.247213 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.247225 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:37Z","lastTransitionTime":"2025-11-26T00:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.350647 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.350729 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.350753 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.350790 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.350823 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:37Z","lastTransitionTime":"2025-11-26T00:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.453364 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.453402 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.453432 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.453450 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.453472 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:37Z","lastTransitionTime":"2025-11-26T00:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.528492 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.528559 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:37 crc kubenswrapper[4875]: E1126 00:07:37.528630 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:37 crc kubenswrapper[4875]: E1126 00:07:37.528674 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.528754 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:37 crc kubenswrapper[4875]: E1126 00:07:37.528819 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.542937 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.554696 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.555621 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.555650 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.555659 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.555675 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.555687 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:37Z","lastTransitionTime":"2025-11-26T00:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.568946 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.582026 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.597359 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.609688 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.622188 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.642087 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.657305 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.657345 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.657357 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.657374 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.657385 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:37Z","lastTransitionTime":"2025-11-26T00:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.658056 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.669645 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.681903 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.691866 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.706386 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.707527 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.718200 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.729724 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-apiserver-check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.741069 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.748184 4875 prober_manager.go:312] "Failed to trigger a manual run" probe="Readiness" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.751687 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.758919 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.758946 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.758955 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.758969 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.758979 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:37Z","lastTransitionTime":"2025-11-26T00:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.762171 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.784045 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.797375 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.809154 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.821161 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.833056 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.845161 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.855872 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.860993 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.861038 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.861054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.861074 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.861090 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:37Z","lastTransitionTime":"2025-11-26T00:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.868121 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.881733 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.894320 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.917267 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.930019 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.963129 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.963179 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.963190 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.963207 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:37 crc kubenswrapper[4875]: I1126 00:07:37.963219 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:37Z","lastTransitionTime":"2025-11-26T00:07:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.065358 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.065391 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.065401 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.065432 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.065444 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:38Z","lastTransitionTime":"2025-11-26T00:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.167975 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.168014 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.168027 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.168043 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.168054 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:38Z","lastTransitionTime":"2025-11-26T00:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.270663 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.270708 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.270718 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.270736 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.270748 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:38Z","lastTransitionTime":"2025-11-26T00:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.373126 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.373174 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.373185 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.373200 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.373213 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:38Z","lastTransitionTime":"2025-11-26T00:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.476071 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.476124 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.476133 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.476152 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.476162 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:38Z","lastTransitionTime":"2025-11-26T00:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.578989 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.579057 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.579081 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.579113 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.579137 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:38Z","lastTransitionTime":"2025-11-26T00:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.682836 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.682915 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.682939 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.682971 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.682998 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:38Z","lastTransitionTime":"2025-11-26T00:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.753322 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/0.log" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.756658 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439" exitCode=1 Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.756696 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439"} Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.757319 4875 scope.go:117] "RemoveContainer" containerID="59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.772943 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.785721 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.785755 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.785769 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.785785 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.785797 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:38Z","lastTransitionTime":"2025-11-26T00:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.793641 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.808983 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.821162 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.837907 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:38Z\\\",\\\"message\\\":\\\"s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 00:07:38.275187 6147 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 00:07:38.275274 6147 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 00:07:38.275699 6147 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 00:07:38.276055 6147 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 00:07:38.276076 6147 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 00:07:38.276090 6147 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 00:07:38.276099 6147 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 00:07:38.276112 6147 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 00:07:38.276144 6147 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 00:07:38.276193 6147 factory.go:656] Stopping watch factory\\\\nI1126 00:07:38.276197 6147 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 00:07:38.276209 6147 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.847583 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.860866 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.872606 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.888111 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.888146 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.888154 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.888168 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.888179 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:38Z","lastTransitionTime":"2025-11-26T00:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.891925 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.910237 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.923926 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.933885 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.945337 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.954671 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.965449 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:38Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.989826 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.989878 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.989886 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.989899 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:38 crc kubenswrapper[4875]: I1126 00:07:38.989908 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:38Z","lastTransitionTime":"2025-11-26T00:07:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.092599 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.092650 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.092663 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.092679 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.092692 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:39Z","lastTransitionTime":"2025-11-26T00:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.197881 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.197925 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.197937 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.197996 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.198009 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:39Z","lastTransitionTime":"2025-11-26T00:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.299969 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.300000 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.300008 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.300021 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.300030 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:39Z","lastTransitionTime":"2025-11-26T00:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.402368 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.402406 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.402443 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.402459 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.402470 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:39Z","lastTransitionTime":"2025-11-26T00:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.504650 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.504687 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.504695 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.504714 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.504724 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:39Z","lastTransitionTime":"2025-11-26T00:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.528388 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.528489 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:39 crc kubenswrapper[4875]: E1126 00:07:39.528515 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.528389 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:39 crc kubenswrapper[4875]: E1126 00:07:39.528614 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:39 crc kubenswrapper[4875]: E1126 00:07:39.528729 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.607376 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.607429 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.607440 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.607455 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.607465 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:39Z","lastTransitionTime":"2025-11-26T00:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.710061 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.710098 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.710106 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.710120 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.710128 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:39Z","lastTransitionTime":"2025-11-26T00:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.761520 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/1.log" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.762473 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/0.log" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.769480 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64" exitCode=1 Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.769536 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64"} Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.769586 4875 scope.go:117] "RemoveContainer" containerID="59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.770368 4875 scope.go:117] "RemoveContainer" containerID="325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64" Nov 26 00:07:39 crc kubenswrapper[4875]: E1126 00:07:39.770578 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.782263 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.795401 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.808891 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.812501 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.812524 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.812532 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.812545 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.812556 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:39Z","lastTransitionTime":"2025-11-26T00:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.821393 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.832654 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.845452 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.863553 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.873528 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.876806 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.889624 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.901454 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.915190 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.915242 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.915257 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.915279 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.915295 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:39Z","lastTransitionTime":"2025-11-26T00:07:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.918947 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.938255 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:38Z\\\",\\\"message\\\":\\\"s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 00:07:38.275187 6147 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 00:07:38.275274 6147 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 00:07:38.275699 6147 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 00:07:38.276055 6147 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 00:07:38.276076 6147 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 00:07:38.276090 6147 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 00:07:38.276099 6147 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 00:07:38.276112 6147 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 00:07:38.276144 6147 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 00:07:38.276193 6147 factory.go:656] Stopping watch factory\\\\nI1126 00:07:38.276197 6147 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 00:07:38.276209 6147 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:39Z\\\",\\\"message\\\":\\\" for Pod openshift-etcd/etcd-crc in node crc\\\\nI1126 00:07:39.637084 6273 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 00:07:39.637120 6273 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI1126 00:07:39.637132 6273 default_network_controller.go:776] Recording success event on pod openshift-etcd/etcd-crc\\\\nF1126 00:07:39.637149 6273 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.951089 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.967684 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:39 crc kubenswrapper[4875]: I1126 00:07:39.984029 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:39Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.018512 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.018562 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.018575 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.018593 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.018605 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:40Z","lastTransitionTime":"2025-11-26T00:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.120654 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.120686 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.120694 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.120707 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.120715 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:40Z","lastTransitionTime":"2025-11-26T00:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.222852 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.222911 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.222922 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.222941 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.222953 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:40Z","lastTransitionTime":"2025-11-26T00:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.325660 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.325737 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.325755 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.325777 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.325792 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:40Z","lastTransitionTime":"2025-11-26T00:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.428264 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.428335 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.428353 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.428378 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.428396 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:40Z","lastTransitionTime":"2025-11-26T00:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.504331 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp"] Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.505021 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.506605 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.507056 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.530507 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.530892 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.530922 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.530936 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.530950 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.530962 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:40Z","lastTransitionTime":"2025-11-26T00:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.543752 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.556435 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.569631 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.589586 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://59e3981dc1ae71efe429a34fd8a59ad197b3eb65d72be95a2c8a3b8962d4b439\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:38Z\\\",\\\"message\\\":\\\"s) from github.com/openshift/ovn-kubernetes/go-controller/pkg/crd/egressservice/v1/apis/informers/externalversions/factory.go:140\\\\nI1126 00:07:38.275187 6147 reflector.go:311] Stopping reflector *v1alpha1.BaselineAdminNetworkPolicy (0s) from sigs.k8s.io/network-policy-api/pkg/client/informers/externalversions/factory.go:141\\\\nI1126 00:07:38.275274 6147 reflector.go:311] Stopping reflector *v1.EndpointSlice (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 00:07:38.275699 6147 reflector.go:311] Stopping reflector *v1.NetworkPolicy (0s) from k8s.io/client-go/informers/factory.go:160\\\\nI1126 00:07:38.276055 6147 handler.go:190] Sending *v1.Node event handler 2 for removal\\\\nI1126 00:07:38.276076 6147 handler.go:190] Sending *v1.Node event handler 7 for removal\\\\nI1126 00:07:38.276090 6147 handler.go:190] Sending *v1.EgressFirewall event handler 9 for removal\\\\nI1126 00:07:38.276099 6147 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 00:07:38.276112 6147 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 00:07:38.276144 6147 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 00:07:38.276193 6147 factory.go:656] Stopping watch factory\\\\nI1126 00:07:38.276197 6147 handler.go:208] Removed *v1.EgressFirewall event handler 9\\\\nI1126 00:07:38.276209 6147 ovnkube.go:599] Stopped ovnkube\\\\nI1126 0\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:39Z\\\",\\\"message\\\":\\\" for Pod openshift-etcd/etcd-crc in node crc\\\\nI1126 00:07:39.637084 6273 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 00:07:39.637120 6273 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI1126 00:07:39.637132 6273 default_network_controller.go:776] Recording success event on pod openshift-etcd/etcd-crc\\\\nF1126 00:07:39.637149 6273 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:38Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.600335 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.613702 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.630940 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.632767 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.632801 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.632810 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.632825 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.632834 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:40Z","lastTransitionTime":"2025-11-26T00:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.636476 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6mk2\" (UniqueName: \"kubernetes.io/projected/40b82738-34d2-4c2f-99c3-ae1198e964dc-kube-api-access-s6mk2\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.636570 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/40b82738-34d2-4c2f-99c3-ae1198e964dc-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.636611 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/40b82738-34d2-4c2f-99c3-ae1198e964dc-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.636667 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/40b82738-34d2-4c2f-99c3-ae1198e964dc-env-overrides\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.663163 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.685469 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.699372 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.711994 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.730065 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.734794 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.734867 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.734888 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.734914 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.734931 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:40Z","lastTransitionTime":"2025-11-26T00:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.737142 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/40b82738-34d2-4c2f-99c3-ae1198e964dc-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.737195 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/40b82738-34d2-4c2f-99c3-ae1198e964dc-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.737243 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/40b82738-34d2-4c2f-99c3-ae1198e964dc-env-overrides\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.737299 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6mk2\" (UniqueName: \"kubernetes.io/projected/40b82738-34d2-4c2f-99c3-ae1198e964dc-kube-api-access-s6mk2\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.737860 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/40b82738-34d2-4c2f-99c3-ae1198e964dc-ovnkube-config\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.737872 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/40b82738-34d2-4c2f-99c3-ae1198e964dc-env-overrides\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.740646 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.742210 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-control-plane-metrics-cert\" (UniqueName: \"kubernetes.io/secret/40b82738-34d2-4c2f-99c3-ae1198e964dc-ovn-control-plane-metrics-cert\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.752282 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6mk2\" (UniqueName: \"kubernetes.io/projected/40b82738-34d2-4c2f-99c3-ae1198e964dc-kube-api-access-s6mk2\") pod \"ovnkube-control-plane-749d76644c-qhmvp\" (UID: \"40b82738-34d2-4c2f-99c3-ae1198e964dc\") " pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.755154 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.773643 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.774311 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/1.log" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.777918 4875 scope.go:117] "RemoveContainer" containerID="325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64" Nov 26 00:07:40 crc kubenswrapper[4875]: E1126 00:07:40.778078 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.797168 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.809759 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.822184 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.823337 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.836683 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: W1126 00:07:40.837034 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod40b82738_34d2_4c2f_99c3_ae1198e964dc.slice/crio-b94c2e20f71dfc57d821562d83acc57ad135e3d330aaa63c2cddbb54099bffb3 WatchSource:0}: Error finding container b94c2e20f71dfc57d821562d83acc57ad135e3d330aaa63c2cddbb54099bffb3: Status 404 returned error can't find the container with id b94c2e20f71dfc57d821562d83acc57ad135e3d330aaa63c2cddbb54099bffb3 Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.837473 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.837503 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.837517 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.837533 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.837547 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:40Z","lastTransitionTime":"2025-11-26T00:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.851250 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.862140 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.873109 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.886603 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.897781 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.910166 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.921783 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.937998 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:39Z\\\",\\\"message\\\":\\\" for Pod openshift-etcd/etcd-crc in node crc\\\\nI1126 00:07:39.637084 6273 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 00:07:39.637120 6273 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI1126 00:07:39.637132 6273 default_network_controller.go:776] Recording success event on pod openshift-etcd/etcd-crc\\\\nF1126 00:07:39.637149 6273 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.940037 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.940070 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.940079 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.940092 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.940101 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:40Z","lastTransitionTime":"2025-11-26T00:07:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.946988 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.957751 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.967139 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:40 crc kubenswrapper[4875]: I1126 00:07:40.978392 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:40Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.041553 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.041588 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.041597 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.041611 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.041620 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:41Z","lastTransitionTime":"2025-11-26T00:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.143726 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.143768 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.143779 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.143796 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.143806 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:41Z","lastTransitionTime":"2025-11-26T00:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.246011 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.246041 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.246049 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.246062 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.246070 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:41Z","lastTransitionTime":"2025-11-26T00:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.347926 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.347968 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.347978 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.347990 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.348000 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:41Z","lastTransitionTime":"2025-11-26T00:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.450110 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.450143 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.450152 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.450166 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.450175 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:41Z","lastTransitionTime":"2025-11-26T00:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.528190 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.528279 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:41 crc kubenswrapper[4875]: E1126 00:07:41.528342 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.528393 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:41 crc kubenswrapper[4875]: E1126 00:07:41.528492 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:41 crc kubenswrapper[4875]: E1126 00:07:41.528547 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.554097 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.554156 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.554182 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.554200 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.554212 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:41Z","lastTransitionTime":"2025-11-26T00:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.586233 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/network-metrics-daemon-jhkp7"] Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.586752 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:41 crc kubenswrapper[4875]: E1126 00:07:41.586816 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.597317 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.612271 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.627186 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-rbac-proxy ovnkube-cluster-manager]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.640747 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.654310 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.656318 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.656376 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.656388 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.656457 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.656472 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:41Z","lastTransitionTime":"2025-11-26T00:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.667164 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.685672 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:39Z\\\",\\\"message\\\":\\\" for Pod openshift-etcd/etcd-crc in node crc\\\\nI1126 00:07:39.637084 6273 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 00:07:39.637120 6273 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI1126 00:07:39.637132 6273 default_network_controller.go:776] Recording success event on pod openshift-etcd/etcd-crc\\\\nF1126 00:07:39.637149 6273 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.698998 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.710024 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.721895 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.737821 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.746503 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.746561 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wmjfj\" (UniqueName: \"kubernetes.io/projected/3b640f09-f96d-4197-97be-f50e211e484d-kube-api-access-wmjfj\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.746597 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.757718 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.758878 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.758910 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.758920 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.758934 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.758944 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:41Z","lastTransitionTime":"2025-11-26T00:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.774945 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.780645 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" event={"ID":"40b82738-34d2-4c2f-99c3-ae1198e964dc","Type":"ContainerStarted","Data":"b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.780722 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" event={"ID":"40b82738-34d2-4c2f-99c3-ae1198e964dc","Type":"ContainerStarted","Data":"c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.780739 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" event={"ID":"40b82738-34d2-4c2f-99c3-ae1198e964dc","Type":"ContainerStarted","Data":"b94c2e20f71dfc57d821562d83acc57ad135e3d330aaa63c2cddbb54099bffb3"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.787722 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.797975 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.810231 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.822933 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.834338 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.847044 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wmjfj\" (UniqueName: \"kubernetes.io/projected/3b640f09-f96d-4197-97be-f50e211e484d-kube-api-access-wmjfj\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.847148 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:41 crc kubenswrapper[4875]: E1126 00:07:41.847345 4875 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:41 crc kubenswrapper[4875]: E1126 00:07:41.847492 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs podName:3b640f09-f96d-4197-97be-f50e211e484d nodeName:}" failed. No retries permitted until 2025-11-26 00:07:42.347462748 +0000 UTC m=+35.537438463 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs") pod "network-metrics-daemon-jhkp7" (UID: "3b640f09-f96d-4197-97be-f50e211e484d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.849695 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:39Z\\\",\\\"message\\\":\\\" for Pod openshift-etcd/etcd-crc in node crc\\\\nI1126 00:07:39.637084 6273 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 00:07:39.637120 6273 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI1126 00:07:39.637132 6273 default_network_controller.go:776] Recording success event on pod openshift-etcd/etcd-crc\\\\nF1126 00:07:39.637149 6273 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.858674 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.860946 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.860984 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.860993 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.861007 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.861018 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:41Z","lastTransitionTime":"2025-11-26T00:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.863855 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wmjfj\" (UniqueName: \"kubernetes.io/projected/3b640f09-f96d-4197-97be-f50e211e484d-kube-api-access-wmjfj\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.870332 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.881183 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.891723 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.899818 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.916090 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.927933 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.938966 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.950187 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.959803 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.963218 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.963254 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.963263 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.963277 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.963286 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:41Z","lastTransitionTime":"2025-11-26T00:07:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.971318 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.980099 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:41 crc kubenswrapper[4875]: I1126 00:07:41.991585 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.000565 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:41Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.065141 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.065170 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.065180 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.065193 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.065203 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:42Z","lastTransitionTime":"2025-11-26T00:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.167148 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.167182 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.167199 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.167217 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.167228 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:42Z","lastTransitionTime":"2025-11-26T00:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.269478 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.269513 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.269523 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.269542 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.269554 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:42Z","lastTransitionTime":"2025-11-26T00:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.352571 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:42 crc kubenswrapper[4875]: E1126 00:07:42.352719 4875 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:42 crc kubenswrapper[4875]: E1126 00:07:42.352805 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs podName:3b640f09-f96d-4197-97be-f50e211e484d nodeName:}" failed. No retries permitted until 2025-11-26 00:07:43.352788177 +0000 UTC m=+36.542763872 (durationBeforeRetry 1s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs") pod "network-metrics-daemon-jhkp7" (UID: "3b640f09-f96d-4197-97be-f50e211e484d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.371804 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.371839 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.371849 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.371870 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.371881 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:42Z","lastTransitionTime":"2025-11-26T00:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.474065 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.474128 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.474137 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.474151 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.474159 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:42Z","lastTransitionTime":"2025-11-26T00:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.577174 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.577461 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.577528 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.577639 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.577722 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:42Z","lastTransitionTime":"2025-11-26T00:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.679863 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.679902 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.679912 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.680047 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.680074 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:42Z","lastTransitionTime":"2025-11-26T00:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.782248 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.782318 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.782341 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.782390 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.782447 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:42Z","lastTransitionTime":"2025-11-26T00:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.884037 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.884072 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.884083 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.884097 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.884106 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:42Z","lastTransitionTime":"2025-11-26T00:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.986589 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.986625 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.986633 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.986646 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:42 crc kubenswrapper[4875]: I1126 00:07:42.986654 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:42Z","lastTransitionTime":"2025-11-26T00:07:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.088540 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.088577 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.088587 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.088601 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.088610 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:43Z","lastTransitionTime":"2025-11-26T00:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.159544 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.159742 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:07:59.159728066 +0000 UTC m=+52.349703761 (durationBeforeRetry 16s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.191132 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.191161 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.191169 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.191183 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.191192 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:43Z","lastTransitionTime":"2025-11-26T00:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.260346 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.260388 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.260446 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.260470 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.260585 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.260600 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.260609 4875 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.260647 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:59.260635182 +0000 UTC m=+52.450610877 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.261006 4875 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.261009 4875 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.261076 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.261185 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.261194 4875 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.261338 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:59.261128244 +0000 UTC m=+52.451103939 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.261456 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:59.261443022 +0000 UTC m=+52.451418817 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.261567 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 00:07:59.261556235 +0000 UTC m=+52.451532020 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.293464 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.293739 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.293861 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.293985 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.294092 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:43Z","lastTransitionTime":"2025-11-26T00:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.361469 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.361653 4875 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.361950 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs podName:3b640f09-f96d-4197-97be-f50e211e484d nodeName:}" failed. No retries permitted until 2025-11-26 00:07:45.361928997 +0000 UTC m=+38.551904712 (durationBeforeRetry 2s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs") pod "network-metrics-daemon-jhkp7" (UID: "3b640f09-f96d-4197-97be-f50e211e484d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.396596 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.396633 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.396644 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.396660 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.396673 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:43Z","lastTransitionTime":"2025-11-26T00:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.484217 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.484250 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.484258 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.484270 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.484280 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:43Z","lastTransitionTime":"2025-11-26T00:07:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.528127 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.528352 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.528512 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.528629 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.528727 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:43 crc kubenswrapper[4875]: I1126 00:07:43.528855 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.528883 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:43 crc kubenswrapper[4875]: E1126 00:07:43.529174 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:44 crc kubenswrapper[4875]: E1126 00:07:44.183622 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:43Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:43Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:43Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:43Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:43Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.187770 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.188049 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.188136 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.188233 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.188325 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:44 crc kubenswrapper[4875]: E1126 00:07:44.202349 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:44Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.205525 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.205561 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.205581 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.205599 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.205615 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:44 crc kubenswrapper[4875]: E1126 00:07:44.216219 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:44Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.219080 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.219111 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.219119 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.219134 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.219143 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:44 crc kubenswrapper[4875]: E1126 00:07:44.228821 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:44Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.232015 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.232044 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.232054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.232069 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.232080 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:44 crc kubenswrapper[4875]: E1126 00:07:44.243492 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:44Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:44Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:44 crc kubenswrapper[4875]: E1126 00:07:44.243618 4875 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.245017 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.245052 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.245063 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.245078 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.245092 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.347703 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.347738 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.347747 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.347759 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.347769 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.450639 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.450698 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.450708 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.450721 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.450731 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.528672 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:44 crc kubenswrapper[4875]: E1126 00:07:44.528822 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.552946 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.552987 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.552996 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.553015 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.553027 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.654502 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.654546 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.654557 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.654572 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.654581 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.756879 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.756924 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.756935 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.756953 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.756965 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.858472 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.858504 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.858512 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.858524 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.858532 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.960666 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.960718 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.960730 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.960765 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:44 crc kubenswrapper[4875]: I1126 00:07:44.960778 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:44Z","lastTransitionTime":"2025-11-26T00:07:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.062775 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.062831 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.062843 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.062886 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.062912 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:45Z","lastTransitionTime":"2025-11-26T00:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.166018 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.166054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.166063 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.166085 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.166096 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:45Z","lastTransitionTime":"2025-11-26T00:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.268705 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.268748 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.268760 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.268776 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.268789 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:45Z","lastTransitionTime":"2025-11-26T00:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.371249 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.371301 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.371312 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.371329 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.371340 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:45Z","lastTransitionTime":"2025-11-26T00:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.380752 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:45 crc kubenswrapper[4875]: E1126 00:07:45.380890 4875 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:45 crc kubenswrapper[4875]: E1126 00:07:45.380986 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs podName:3b640f09-f96d-4197-97be-f50e211e484d nodeName:}" failed. No retries permitted until 2025-11-26 00:07:49.380961428 +0000 UTC m=+42.570937133 (durationBeforeRetry 4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs") pod "network-metrics-daemon-jhkp7" (UID: "3b640f09-f96d-4197-97be-f50e211e484d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.473865 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.473907 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.473918 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.473931 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.473940 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:45Z","lastTransitionTime":"2025-11-26T00:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.528859 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.528868 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:45 crc kubenswrapper[4875]: E1126 00:07:45.529031 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.529157 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:45 crc kubenswrapper[4875]: E1126 00:07:45.529232 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:45 crc kubenswrapper[4875]: E1126 00:07:45.529347 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.576301 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.576352 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.576367 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.576392 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.576435 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:45Z","lastTransitionTime":"2025-11-26T00:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.678439 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.678480 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.678494 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.678509 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.678517 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:45Z","lastTransitionTime":"2025-11-26T00:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.780671 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.780709 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.780720 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.780737 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.780749 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:45Z","lastTransitionTime":"2025-11-26T00:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.883034 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.883085 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.883101 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.883123 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.883139 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:45Z","lastTransitionTime":"2025-11-26T00:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.986016 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.986061 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.986072 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.986091 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:45 crc kubenswrapper[4875]: I1126 00:07:45.986102 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:45Z","lastTransitionTime":"2025-11-26T00:07:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.089111 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.089153 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.089161 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.089175 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.089187 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:46Z","lastTransitionTime":"2025-11-26T00:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.191128 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.191176 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.191189 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.191207 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.191220 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:46Z","lastTransitionTime":"2025-11-26T00:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.293201 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.293246 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.293262 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.293284 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.293298 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:46Z","lastTransitionTime":"2025-11-26T00:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.396290 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.396339 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.396351 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.396368 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.396381 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:46Z","lastTransitionTime":"2025-11-26T00:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.499298 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.499340 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.499352 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.499368 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.499379 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:46Z","lastTransitionTime":"2025-11-26T00:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.527911 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:46 crc kubenswrapper[4875]: E1126 00:07:46.528037 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.602028 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.602077 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.602089 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.602106 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.602116 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:46Z","lastTransitionTime":"2025-11-26T00:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.704363 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.704395 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.704403 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.704439 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.704448 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:46Z","lastTransitionTime":"2025-11-26T00:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.806790 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.806823 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.806833 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.806847 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.806856 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:46Z","lastTransitionTime":"2025-11-26T00:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.909947 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.910035 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.910072 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.910103 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:46 crc kubenswrapper[4875]: I1126 00:07:46.910127 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:46Z","lastTransitionTime":"2025-11-26T00:07:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.013121 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.013164 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.013173 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.013188 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.013197 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:47Z","lastTransitionTime":"2025-11-26T00:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.115466 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.115564 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.115598 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.115618 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.115629 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:47Z","lastTransitionTime":"2025-11-26T00:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.218062 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.218092 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.218103 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.218119 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.218129 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:47Z","lastTransitionTime":"2025-11-26T00:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.321280 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.321346 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.321355 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.321369 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.321378 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:47Z","lastTransitionTime":"2025-11-26T00:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.424006 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.424039 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.424050 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.424065 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.424075 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:47Z","lastTransitionTime":"2025-11-26T00:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.527830 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.527919 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.527946 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.527978 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.528004 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:47Z","lastTransitionTime":"2025-11-26T00:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.528063 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.528084 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.528067 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:47 crc kubenswrapper[4875]: E1126 00:07:47.528206 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:47 crc kubenswrapper[4875]: E1126 00:07:47.528294 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:47 crc kubenswrapper[4875]: E1126 00:07:47.528469 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.564577 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.585796 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.603815 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.623256 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.632327 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.632401 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.632458 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.632499 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.632528 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:47Z","lastTransitionTime":"2025-11-26T00:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.636292 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.658832 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.674868 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.690032 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.709274 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.723546 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.734814 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.734857 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.734869 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.734884 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.734895 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:47Z","lastTransitionTime":"2025-11-26T00:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.737614 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.758356 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:39Z\\\",\\\"message\\\":\\\" for Pod openshift-etcd/etcd-crc in node crc\\\\nI1126 00:07:39.637084 6273 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 00:07:39.637120 6273 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI1126 00:07:39.637132 6273 default_network_controller.go:776] Recording success event on pod openshift-etcd/etcd-crc\\\\nF1126 00:07:39.637149 6273 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":1,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 10s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.774331 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.790830 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.812174 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.827296 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.838680 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.838729 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.838741 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.838760 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.838775 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:47Z","lastTransitionTime":"2025-11-26T00:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.846763 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.941966 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.942081 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.942117 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.942152 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:47 crc kubenswrapper[4875]: I1126 00:07:47.942172 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:47Z","lastTransitionTime":"2025-11-26T00:07:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.045390 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.045526 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.045545 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.045573 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.045595 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:48Z","lastTransitionTime":"2025-11-26T00:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.148813 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.148898 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.148926 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.149187 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.149321 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:48Z","lastTransitionTime":"2025-11-26T00:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.252499 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.252567 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.252586 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.252619 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.252644 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:48Z","lastTransitionTime":"2025-11-26T00:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.356010 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.356069 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.356081 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.356100 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.356114 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:48Z","lastTransitionTime":"2025-11-26T00:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.461148 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.462160 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.462202 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.462235 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.462258 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:48Z","lastTransitionTime":"2025-11-26T00:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.528869 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:48 crc kubenswrapper[4875]: E1126 00:07:48.529032 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.564718 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.564777 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.564797 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.564817 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.564831 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:48Z","lastTransitionTime":"2025-11-26T00:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.666916 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.666950 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.666967 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.666984 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.666995 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:48Z","lastTransitionTime":"2025-11-26T00:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.769208 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.769246 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.769257 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.769273 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.769286 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:48Z","lastTransitionTime":"2025-11-26T00:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.871639 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.871691 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.871705 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.871722 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.871732 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:48Z","lastTransitionTime":"2025-11-26T00:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.974802 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.974923 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.974944 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.974967 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:48 crc kubenswrapper[4875]: I1126 00:07:48.974981 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:48Z","lastTransitionTime":"2025-11-26T00:07:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.077025 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.077067 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.077076 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.077090 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.077102 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:49Z","lastTransitionTime":"2025-11-26T00:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.181530 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.181582 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.181596 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.181617 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.181631 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:49Z","lastTransitionTime":"2025-11-26T00:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.284020 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.284054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.284064 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.284094 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.284104 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:49Z","lastTransitionTime":"2025-11-26T00:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.386540 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.386576 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.386585 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.386596 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.386604 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:49Z","lastTransitionTime":"2025-11-26T00:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.425190 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:49 crc kubenswrapper[4875]: E1126 00:07:49.425336 4875 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:49 crc kubenswrapper[4875]: E1126 00:07:49.425438 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs podName:3b640f09-f96d-4197-97be-f50e211e484d nodeName:}" failed. No retries permitted until 2025-11-26 00:07:57.425397163 +0000 UTC m=+50.615372858 (durationBeforeRetry 8s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs") pod "network-metrics-daemon-jhkp7" (UID: "3b640f09-f96d-4197-97be-f50e211e484d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.488934 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.488981 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.488992 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.489010 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.489020 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:49Z","lastTransitionTime":"2025-11-26T00:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.528331 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.528398 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.528331 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:49 crc kubenswrapper[4875]: E1126 00:07:49.528534 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:49 crc kubenswrapper[4875]: E1126 00:07:49.528622 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:49 crc kubenswrapper[4875]: E1126 00:07:49.528709 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.591454 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.591483 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.591491 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.591503 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.591512 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:49Z","lastTransitionTime":"2025-11-26T00:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.694283 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.694354 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.694378 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.694397 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.694431 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:49Z","lastTransitionTime":"2025-11-26T00:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.797153 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.797194 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.797205 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.797222 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.797234 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:49Z","lastTransitionTime":"2025-11-26T00:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.899756 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.899805 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.899819 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.899837 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:49 crc kubenswrapper[4875]: I1126 00:07:49.899850 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:49Z","lastTransitionTime":"2025-11-26T00:07:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.001726 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.001786 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.001801 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.001821 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.001834 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:50Z","lastTransitionTime":"2025-11-26T00:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.104517 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.104705 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.104733 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.104760 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.104784 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:50Z","lastTransitionTime":"2025-11-26T00:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.207626 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.207702 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.207724 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.207757 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.207779 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:50Z","lastTransitionTime":"2025-11-26T00:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.311127 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.311173 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.311182 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.311196 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.311206 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:50Z","lastTransitionTime":"2025-11-26T00:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.419891 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.419986 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.420007 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.420042 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.420065 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:50Z","lastTransitionTime":"2025-11-26T00:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.522720 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.522839 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.522867 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.522899 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.522922 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:50Z","lastTransitionTime":"2025-11-26T00:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.528882 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:50 crc kubenswrapper[4875]: E1126 00:07:50.529012 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.626263 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.626308 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.626318 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.626331 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.626341 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:50Z","lastTransitionTime":"2025-11-26T00:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.729648 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.729731 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.729758 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.729793 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.729817 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:50Z","lastTransitionTime":"2025-11-26T00:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.833151 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.833227 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.833250 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.833280 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.833300 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:50Z","lastTransitionTime":"2025-11-26T00:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.937059 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.937132 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.937155 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.937187 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:50 crc kubenswrapper[4875]: I1126 00:07:50.937210 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:50Z","lastTransitionTime":"2025-11-26T00:07:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.039578 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.039620 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.039632 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.039649 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.039661 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:51Z","lastTransitionTime":"2025-11-26T00:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.166854 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.166922 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.166940 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.166972 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.166992 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:51Z","lastTransitionTime":"2025-11-26T00:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.270126 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.270174 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.270185 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.270201 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.270220 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:51Z","lastTransitionTime":"2025-11-26T00:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.373175 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.373271 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.373288 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.373302 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.373311 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:51Z","lastTransitionTime":"2025-11-26T00:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.475334 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.475376 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.475385 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.475400 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.475428 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:51Z","lastTransitionTime":"2025-11-26T00:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.528907 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.528914 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:51 crc kubenswrapper[4875]: E1126 00:07:51.529020 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.529063 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:51 crc kubenswrapper[4875]: E1126 00:07:51.529131 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:51 crc kubenswrapper[4875]: E1126 00:07:51.529186 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.578828 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.578883 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.578898 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.578921 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.578937 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:51Z","lastTransitionTime":"2025-11-26T00:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.682060 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.682123 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.682137 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.682162 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.682175 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:51Z","lastTransitionTime":"2025-11-26T00:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.785943 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.786004 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.786014 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.786031 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.786041 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:51Z","lastTransitionTime":"2025-11-26T00:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.889385 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.889436 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.889447 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.889462 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.889472 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:51Z","lastTransitionTime":"2025-11-26T00:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.993178 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.993233 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.993249 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.993273 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:51 crc kubenswrapper[4875]: I1126 00:07:51.993289 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:51Z","lastTransitionTime":"2025-11-26T00:07:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.096703 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.096749 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.096762 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.096780 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.096793 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:52Z","lastTransitionTime":"2025-11-26T00:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.200640 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.200705 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.200733 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.200771 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.200802 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:52Z","lastTransitionTime":"2025-11-26T00:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.304618 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.304754 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.304814 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.304886 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.304908 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:52Z","lastTransitionTime":"2025-11-26T00:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.408334 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.408387 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.408397 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.408432 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.408444 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:52Z","lastTransitionTime":"2025-11-26T00:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.511610 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.511677 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.511688 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.511705 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.511714 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:52Z","lastTransitionTime":"2025-11-26T00:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.528664 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:52 crc kubenswrapper[4875]: E1126 00:07:52.528942 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.615647 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.615738 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.615762 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.615797 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.615822 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:52Z","lastTransitionTime":"2025-11-26T00:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.718942 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.719022 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.719054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.719084 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.719113 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:52Z","lastTransitionTime":"2025-11-26T00:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.822049 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.822108 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.822120 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.822138 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.822149 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:52Z","lastTransitionTime":"2025-11-26T00:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.925449 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.925497 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.925507 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.925528 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:52 crc kubenswrapper[4875]: I1126 00:07:52.925540 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:52Z","lastTransitionTime":"2025-11-26T00:07:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.029822 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.029881 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.029893 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.029916 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.029930 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:53Z","lastTransitionTime":"2025-11-26T00:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.133751 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.133865 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.133894 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.133936 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.133966 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:53Z","lastTransitionTime":"2025-11-26T00:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.236985 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.237038 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.237046 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.237060 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.237071 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:53Z","lastTransitionTime":"2025-11-26T00:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.340091 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.340163 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.340195 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.340211 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.340223 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:53Z","lastTransitionTime":"2025-11-26T00:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.442446 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.442520 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.442537 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.442559 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.442574 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:53Z","lastTransitionTime":"2025-11-26T00:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.528697 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.528753 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:53 crc kubenswrapper[4875]: E1126 00:07:53.528855 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.528697 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:53 crc kubenswrapper[4875]: E1126 00:07:53.529031 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:53 crc kubenswrapper[4875]: E1126 00:07:53.529083 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.545482 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.545542 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.545559 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.545584 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.545603 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:53Z","lastTransitionTime":"2025-11-26T00:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.648244 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.648280 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.648288 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.648303 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.648317 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:53Z","lastTransitionTime":"2025-11-26T00:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.751181 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.751249 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.751263 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.751281 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.751293 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:53Z","lastTransitionTime":"2025-11-26T00:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.854187 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.854251 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.854264 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.854281 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.854310 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:53Z","lastTransitionTime":"2025-11-26T00:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.957236 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.957284 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.957294 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.957314 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:53 crc kubenswrapper[4875]: I1126 00:07:53.957326 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:53Z","lastTransitionTime":"2025-11-26T00:07:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.060110 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.060159 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.060173 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.060193 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.060205 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.163677 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.163754 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.163777 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.163807 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.163829 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.266267 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.266306 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.266316 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.266329 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.266337 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.318284 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.318380 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.318405 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.318494 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.318517 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: E1126 00:07:54.334710 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.339678 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.339846 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.340010 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.340155 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.340267 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: E1126 00:07:54.352187 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.355653 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.355812 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.355896 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.356001 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.356090 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: E1126 00:07:54.369139 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.374251 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.374315 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.374329 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.374367 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.374382 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: E1126 00:07:54.393887 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.400007 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.400068 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.400082 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.400102 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.400117 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: E1126 00:07:54.417747 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:54Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: E1126 00:07:54.417860 4875 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.419491 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.419534 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.419545 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.419561 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.419573 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.522333 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.522380 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.522452 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.522496 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.522509 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.528525 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:54 crc kubenswrapper[4875]: E1126 00:07:54.528629 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.529792 4875 scope.go:117] "RemoveContainer" containerID="325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.625824 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.626233 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.626246 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.626264 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.626277 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.729243 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.729265 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.729273 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.729286 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.729296 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.825268 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/1.log" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.827772 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025"} Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.828835 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.831781 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.831806 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.831815 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.831828 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.831839 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.838943 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.852796 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.866962 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.882745 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.899571 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.918717 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:39Z\\\",\\\"message\\\":\\\" for Pod openshift-etcd/etcd-crc in node crc\\\\nI1126 00:07:39.637084 6273 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 00:07:39.637120 6273 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI1126 00:07:39.637132 6273 default_network_controller.go:776] Recording success event on pod openshift-etcd/etcd-crc\\\\nF1126 00:07:39.637149 6273 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.934006 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.934046 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.934056 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.934070 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.934079 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:54Z","lastTransitionTime":"2025-11-26T00:07:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.937301 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.950077 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.961650 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.971116 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.981126 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:54 crc kubenswrapper[4875]: I1126 00:07:54.998729 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:54Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.010135 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.032609 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.036545 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.036577 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.036588 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.036603 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.036613 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:55Z","lastTransitionTime":"2025-11-26T00:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.050711 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.074482 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.090579 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.138590 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.138623 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.138634 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.138648 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.138657 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:55Z","lastTransitionTime":"2025-11-26T00:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.240686 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.240720 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.240729 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.240742 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.240761 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:55Z","lastTransitionTime":"2025-11-26T00:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.343505 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.343539 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.343549 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.343563 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.343573 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:55Z","lastTransitionTime":"2025-11-26T00:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.445075 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.445101 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.445110 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.445124 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.445135 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:55Z","lastTransitionTime":"2025-11-26T00:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.528653 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.528695 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.528751 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:55 crc kubenswrapper[4875]: E1126 00:07:55.528852 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:55 crc kubenswrapper[4875]: E1126 00:07:55.528969 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:55 crc kubenswrapper[4875]: E1126 00:07:55.529052 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.547231 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.547281 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.547297 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.547318 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.547341 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:55Z","lastTransitionTime":"2025-11-26T00:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.649862 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.649899 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.649911 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.649934 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.649946 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:55Z","lastTransitionTime":"2025-11-26T00:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.751970 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.752021 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.752033 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.752050 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.752063 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:55Z","lastTransitionTime":"2025-11-26T00:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.833267 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/2.log" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.834250 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/1.log" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.837146 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025" exitCode=1 Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.837179 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025"} Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.837209 4875 scope.go:117] "RemoveContainer" containerID="325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.838342 4875 scope.go:117] "RemoveContainer" containerID="93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025" Nov 26 00:07:55 crc kubenswrapper[4875]: E1126 00:07:55.838689 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.854804 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.854872 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.854885 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.854923 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.854935 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:55Z","lastTransitionTime":"2025-11-26T00:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.859846 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.874000 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.885611 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.902369 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.914266 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.926388 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.936987 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.950685 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.957206 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.957256 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.957265 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.957279 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.957304 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:55Z","lastTransitionTime":"2025-11-26T00:07:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.964242 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.980363 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:55 crc kubenswrapper[4875]: I1126 00:07:55.996134 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:55Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.013778 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://325d860516ab39c35065943a22c5723f6be24442fa42871dd750aaca6f6e4c64\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:39Z\\\",\\\"message\\\":\\\" for Pod openshift-etcd/etcd-crc in node crc\\\\nI1126 00:07:39.637084 6273 model_client.go:398] Mutate operations generated as: [{Op:mutate Table:Port_Group Row:map[] Rows:[] Columns:[] Mutations:[{Column:ports Mutator:insert Value:{GoSet:[{GoUUID:61897e97-c771-4738-8709-09636387cb00}]}}] Timeout:\\\\u003cnil\\\\u003e Where:[where column _uuid == {c02bd945-d57b-49ff-9cd3-202ed3574b26}] Until: Durable:\\\\u003cnil\\\\u003e Comment:\\\\u003cnil\\\\u003e Lock:\\\\u003cnil\\\\u003e UUID: UUIDName:}]\\\\nI1126 00:07:39.637120 6273 obj_retry.go:386] Retry successful for *v1.Pod openshift-etcd/etcd-crc after 0 failed attempt(s)\\\\nI1126 00:07:39.637132 6273 default_network_controller.go:776] Recording success event on pod openshift-etcd/etcd-crc\\\\nF1126 00:07:39.637149 6273 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create admin network policy controller, err: could not add Event Handler for anpInformer during admin network policy controller initialization, handler {0x1fcc6e0 0x1fcc3c0 0x1fcc360} was not added to shared informer because it has stopped already, failed to start node network controller: failed to start default node network controller: failed to set node crc annotations: Internal error occurred: failed calling webhook \\\\\\\"node.network-node-identity.openshift.io\\\\\\\": failed to call webhook: Post \\\\\\\"https://127.0.0.1:9743/no\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:38Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:55Z\\\",\\\"message\\\":\\\"uster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 00:07:55.341064 6494 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:07:55.341125 6494 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.027334 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.042263 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.054846 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.060343 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.060397 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.060428 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.060452 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.060466 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:56Z","lastTransitionTime":"2025-11-26T00:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.066391 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.081670 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.163479 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.163536 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.163547 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.163565 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.163575 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:56Z","lastTransitionTime":"2025-11-26T00:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.266434 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.266511 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.266521 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.266539 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.266550 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:56Z","lastTransitionTime":"2025-11-26T00:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.369109 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.369142 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.369150 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.369162 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.369173 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:56Z","lastTransitionTime":"2025-11-26T00:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.471564 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.471813 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.471889 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.471953 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.472019 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:56Z","lastTransitionTime":"2025-11-26T00:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.528433 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:56 crc kubenswrapper[4875]: E1126 00:07:56.528602 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.574666 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.574735 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.574750 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.574775 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.574793 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:56Z","lastTransitionTime":"2025-11-26T00:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.678806 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.678876 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.678894 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.678926 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.679129 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:56Z","lastTransitionTime":"2025-11-26T00:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.782710 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.782795 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.782815 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.782843 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.782857 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:56Z","lastTransitionTime":"2025-11-26T00:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.843360 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/2.log" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.846845 4875 scope.go:117] "RemoveContainer" containerID="93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025" Nov 26 00:07:56 crc kubenswrapper[4875]: E1126 00:07:56.846995 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.860656 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.872360 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.884990 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.886059 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.886217 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.886321 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.886440 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.886568 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:56Z","lastTransitionTime":"2025-11-26T00:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.897907 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.917616 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.932771 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.949816 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.972533 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.990792 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.991046 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.991208 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.991366 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.991556 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:56Z","lastTransitionTime":"2025-11-26T00:07:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:56 crc kubenswrapper[4875]: I1126 00:07:56.995827 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:56Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.017395 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.029603 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.046700 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.059296 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.077257 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:55Z\\\",\\\"message\\\":\\\"uster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 00:07:55.341064 6494 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:07:55.341125 6494 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.087686 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.093940 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.093999 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.094008 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.094030 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.094042 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:57Z","lastTransitionTime":"2025-11-26T00:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.100870 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.114153 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.196932 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.196991 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.197002 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.197020 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.197029 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:57Z","lastTransitionTime":"2025-11-26T00:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.300262 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.300350 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.300374 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.300448 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.300486 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:57Z","lastTransitionTime":"2025-11-26T00:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.402965 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.403009 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.403020 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.403038 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.403048 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:57Z","lastTransitionTime":"2025-11-26T00:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.447698 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:57 crc kubenswrapper[4875]: E1126 00:07:57.447911 4875 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:57 crc kubenswrapper[4875]: E1126 00:07:57.448025 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs podName:3b640f09-f96d-4197-97be-f50e211e484d nodeName:}" failed. No retries permitted until 2025-11-26 00:08:13.447997258 +0000 UTC m=+66.637972953 (durationBeforeRetry 16s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs") pod "network-metrics-daemon-jhkp7" (UID: "3b640f09-f96d-4197-97be-f50e211e484d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.506008 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.506083 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.506102 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.506129 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.506149 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:57Z","lastTransitionTime":"2025-11-26T00:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.528053 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.528205 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.528083 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:57 crc kubenswrapper[4875]: E1126 00:07:57.528271 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:57 crc kubenswrapper[4875]: E1126 00:07:57.528488 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:57 crc kubenswrapper[4875]: E1126 00:07:57.528644 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.546166 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.572805 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.591848 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.609354 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.609404 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.609433 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.609450 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.609460 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:57Z","lastTransitionTime":"2025-11-26T00:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.617511 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.637775 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.665915 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:55Z\\\",\\\"message\\\":\\\"uster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 00:07:55.341064 6494 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:07:55.341125 6494 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.686623 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.710060 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.711530 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.711566 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.711575 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.711591 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.711603 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:57Z","lastTransitionTime":"2025-11-26T00:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.724760 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.739120 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.751205 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.767294 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.778319 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.789802 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.802060 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.811392 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.813002 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.813054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.813065 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.813081 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.813091 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:57Z","lastTransitionTime":"2025-11-26T00:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.824363 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:07:57Z is after 2025-08-24T17:21:41Z" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.914815 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.914846 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.914857 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.914871 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:57 crc kubenswrapper[4875]: I1126 00:07:57.914881 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:57Z","lastTransitionTime":"2025-11-26T00:07:57Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.017496 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.017644 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.017669 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.017722 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.017737 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:58Z","lastTransitionTime":"2025-11-26T00:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.120091 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.120130 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.120142 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.120157 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.120167 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:58Z","lastTransitionTime":"2025-11-26T00:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.222305 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.222345 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.222356 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.222373 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.222385 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:58Z","lastTransitionTime":"2025-11-26T00:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.325151 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.325189 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.325199 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.325212 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.325222 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:58Z","lastTransitionTime":"2025-11-26T00:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.428181 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.428237 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.428249 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.428268 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.428281 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:58Z","lastTransitionTime":"2025-11-26T00:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.528286 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:07:58 crc kubenswrapper[4875]: E1126 00:07:58.528482 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.529952 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.529985 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.529995 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.530007 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.530019 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:58Z","lastTransitionTime":"2025-11-26T00:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.631956 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.631989 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.631998 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.632011 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.632019 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:58Z","lastTransitionTime":"2025-11-26T00:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.734961 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.735003 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.735012 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.735026 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.735038 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:58Z","lastTransitionTime":"2025-11-26T00:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.838248 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.838309 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.838326 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.838349 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.838367 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:58Z","lastTransitionTime":"2025-11-26T00:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.940898 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.940981 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.941006 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.941036 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:58 crc kubenswrapper[4875]: I1126 00:07:58.941063 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:58Z","lastTransitionTime":"2025-11-26T00:07:58Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.043931 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.044463 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.044490 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.044518 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.044542 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:59Z","lastTransitionTime":"2025-11-26T00:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.146605 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.146657 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.146666 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.146681 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.146690 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:59Z","lastTransitionTime":"2025-11-26T00:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.167332 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.167485 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:08:31.167463632 +0000 UTC m=+84.357439327 (durationBeforeRetry 32s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.249550 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.249608 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.249618 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.249635 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.249646 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:59Z","lastTransitionTime":"2025-11-26T00:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.268015 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.268067 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.268097 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.268137 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268229 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268252 4875 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268303 4875 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268314 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268344 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:08:31.268321997 +0000 UTC m=+84.458297712 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268350 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268365 4875 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268259 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268377 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:08:31.268358688 +0000 UTC m=+84.458334383 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268387 4875 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268447 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 00:08:31.26843196 +0000 UTC m=+84.458407655 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.268464 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 00:08:31.26845721 +0000 UTC m=+84.458432905 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.353127 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.353179 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.353190 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.353209 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.353221 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:59Z","lastTransitionTime":"2025-11-26T00:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.456277 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.456328 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.456337 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.456352 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.456362 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:59Z","lastTransitionTime":"2025-11-26T00:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.529030 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.529188 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.529456 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.529502 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.529703 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:07:59 crc kubenswrapper[4875]: E1126 00:07:59.529866 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.558859 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.558907 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.558916 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.558932 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.558945 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:59Z","lastTransitionTime":"2025-11-26T00:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.661143 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.661184 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.661192 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.661217 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.661235 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:59Z","lastTransitionTime":"2025-11-26T00:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.763011 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.763046 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.763054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.763066 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.763075 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:59Z","lastTransitionTime":"2025-11-26T00:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.865559 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.865591 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.865599 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.865612 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.865624 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:59Z","lastTransitionTime":"2025-11-26T00:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.968142 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.968182 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.968191 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.968211 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:07:59 crc kubenswrapper[4875]: I1126 00:07:59.968222 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:07:59Z","lastTransitionTime":"2025-11-26T00:07:59Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.070698 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.070751 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.070761 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.070777 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.070788 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:00Z","lastTransitionTime":"2025-11-26T00:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.177021 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.177099 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.177113 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.177135 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.177221 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:00Z","lastTransitionTime":"2025-11-26T00:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.279950 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.280016 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.280033 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.280054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.280078 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:00Z","lastTransitionTime":"2025-11-26T00:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.385796 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.385872 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.385886 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.385910 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.385926 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:00Z","lastTransitionTime":"2025-11-26T00:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.489238 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.489293 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.489304 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.489317 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.489327 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:00Z","lastTransitionTime":"2025-11-26T00:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.528812 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:00 crc kubenswrapper[4875]: E1126 00:08:00.528983 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.592680 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.592738 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.592748 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.592765 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.592775 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:00Z","lastTransitionTime":"2025-11-26T00:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.695051 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.695094 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.695108 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.695128 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.695140 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:00Z","lastTransitionTime":"2025-11-26T00:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.797838 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.797904 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.797921 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.797946 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.797961 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:00Z","lastTransitionTime":"2025-11-26T00:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.899595 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.899638 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.899646 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.899661 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:00 crc kubenswrapper[4875]: I1126 00:08:00.899671 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:00Z","lastTransitionTime":"2025-11-26T00:08:00Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.001979 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.002023 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.002032 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.002047 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.002056 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:01Z","lastTransitionTime":"2025-11-26T00:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.104033 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.104062 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.104088 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.104103 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.104111 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:01Z","lastTransitionTime":"2025-11-26T00:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.206053 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.206098 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.206107 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.206119 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.206130 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:01Z","lastTransitionTime":"2025-11-26T00:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.308325 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.308372 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.308384 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.308401 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.308435 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:01Z","lastTransitionTime":"2025-11-26T00:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.410458 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.410491 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.410502 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.410515 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.410523 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:01Z","lastTransitionTime":"2025-11-26T00:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.512352 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.512394 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.512404 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.512442 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.512454 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:01Z","lastTransitionTime":"2025-11-26T00:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.528643 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.528643 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:01 crc kubenswrapper[4875]: E1126 00:08:01.528773 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:01 crc kubenswrapper[4875]: E1126 00:08:01.528828 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.528665 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:01 crc kubenswrapper[4875]: E1126 00:08:01.528907 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.614618 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.614657 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.614667 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.614682 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.614692 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:01Z","lastTransitionTime":"2025-11-26T00:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.717009 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.717075 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.717087 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.717102 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.717114 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:01Z","lastTransitionTime":"2025-11-26T00:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.819477 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.819512 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.819525 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.819541 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.819551 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:01Z","lastTransitionTime":"2025-11-26T00:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.921796 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.921879 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.921902 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.921932 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:01 crc kubenswrapper[4875]: I1126 00:08:01.921956 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:01Z","lastTransitionTime":"2025-11-26T00:08:01Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.024167 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.024238 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.024249 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.024263 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.024271 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:02Z","lastTransitionTime":"2025-11-26T00:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.126810 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.126844 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.126855 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.126871 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.126882 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:02Z","lastTransitionTime":"2025-11-26T00:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.229272 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.229326 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.229348 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.229367 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.229381 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:02Z","lastTransitionTime":"2025-11-26T00:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.332188 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.332249 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.332266 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.332291 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.332307 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:02Z","lastTransitionTime":"2025-11-26T00:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.434929 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.435012 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.435034 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.435067 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.435084 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:02Z","lastTransitionTime":"2025-11-26T00:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.528341 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:02 crc kubenswrapper[4875]: E1126 00:08:02.528584 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.537897 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.537952 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.537967 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.537989 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.538007 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:02Z","lastTransitionTime":"2025-11-26T00:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.640649 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.640704 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.640716 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.640734 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.640748 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:02Z","lastTransitionTime":"2025-11-26T00:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.744477 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.744523 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.744540 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.744563 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.744579 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:02Z","lastTransitionTime":"2025-11-26T00:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.850368 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.850433 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.850448 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.850464 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.850475 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:02Z","lastTransitionTime":"2025-11-26T00:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.952242 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.952284 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.952293 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.952307 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:02 crc kubenswrapper[4875]: I1126 00:08:02.952316 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:02Z","lastTransitionTime":"2025-11-26T00:08:02Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.054804 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.054852 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.054864 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.054882 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.054893 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:03Z","lastTransitionTime":"2025-11-26T00:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.156796 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.156828 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.156837 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.156851 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.156862 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:03Z","lastTransitionTime":"2025-11-26T00:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.260128 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.260204 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.260226 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.260256 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.260280 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:03Z","lastTransitionTime":"2025-11-26T00:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.364365 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.364447 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.364464 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.364485 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.364499 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:03Z","lastTransitionTime":"2025-11-26T00:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.468325 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.468397 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.468447 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.468475 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.468495 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:03Z","lastTransitionTime":"2025-11-26T00:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.528372 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.528400 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:03 crc kubenswrapper[4875]: E1126 00:08:03.528679 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.528783 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:03 crc kubenswrapper[4875]: E1126 00:08:03.529011 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:03 crc kubenswrapper[4875]: E1126 00:08:03.529155 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.571116 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.571191 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.571215 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.571246 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.571268 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:03Z","lastTransitionTime":"2025-11-26T00:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.673562 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.673631 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.673648 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.673675 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.673692 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:03Z","lastTransitionTime":"2025-11-26T00:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.700004 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.709387 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler/openshift-kube-scheduler-crc"] Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.738270 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.755044 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.765523 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.776186 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.776236 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.776246 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.776260 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.776269 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:03Z","lastTransitionTime":"2025-11-26T00:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.778100 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.791519 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.803215 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.817694 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.832848 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.846001 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.865302 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.878312 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.878385 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.878442 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.878479 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.878505 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:03Z","lastTransitionTime":"2025-11-26T00:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.879894 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.906879 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:55Z\\\",\\\"message\\\":\\\"uster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 00:07:55.341064 6494 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:07:55.341125 6494 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.921539 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.936398 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.955049 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.972749 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.980979 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.981005 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.981016 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.981031 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.981040 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:03Z","lastTransitionTime":"2025-11-26T00:08:03Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:03 crc kubenswrapper[4875]: I1126 00:08:03.985775 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:03Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.083358 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.083396 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.083430 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.083447 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.083459 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.186604 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.186652 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.186662 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.186679 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.186689 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.290152 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.290229 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.290253 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.290288 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.290375 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.393765 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.393855 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.393878 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.393905 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.393930 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.496260 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.496291 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.496300 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.496312 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.496320 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.528630 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:04 crc kubenswrapper[4875]: E1126 00:08:04.528786 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.599189 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.599245 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.599261 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.599282 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.599298 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.702475 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.702529 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.702547 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.702567 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.702581 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.718534 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.718686 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.718704 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.718718 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.718729 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: E1126 00:08:04.732399 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:04Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.736403 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.736501 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.736524 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.736551 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.736573 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: E1126 00:08:04.750649 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:04Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.754266 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.754326 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.754344 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.754364 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.754379 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: E1126 00:08:04.767542 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:04Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.772544 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.772610 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.772628 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.773098 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.773175 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: E1126 00:08:04.794108 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:04Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.799358 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.799397 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.799426 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.799442 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.799452 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: E1126 00:08:04.818701 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:04Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:04Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:04 crc kubenswrapper[4875]: E1126 00:08:04.818820 4875 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.820363 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.820386 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.820394 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.820422 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.820432 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.922872 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.922911 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.922920 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.922953 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:04 crc kubenswrapper[4875]: I1126 00:08:04.922962 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:04Z","lastTransitionTime":"2025-11-26T00:08:04Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.025149 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.025203 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.025216 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.025240 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.025253 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:05Z","lastTransitionTime":"2025-11-26T00:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.127555 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.127597 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.127606 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.127621 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.127631 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:05Z","lastTransitionTime":"2025-11-26T00:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.230589 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.230631 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.230646 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.230663 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.230676 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:05Z","lastTransitionTime":"2025-11-26T00:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.334252 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.334310 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.334325 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.334348 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.334363 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:05Z","lastTransitionTime":"2025-11-26T00:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.438345 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.438446 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.438460 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.438483 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.438502 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:05Z","lastTransitionTime":"2025-11-26T00:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.528703 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.528731 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.528844 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:05 crc kubenswrapper[4875]: E1126 00:08:05.529032 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:05 crc kubenswrapper[4875]: E1126 00:08:05.529169 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:05 crc kubenswrapper[4875]: E1126 00:08:05.529428 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.540101 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.540139 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.540153 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.540169 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.540182 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:05Z","lastTransitionTime":"2025-11-26T00:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.642908 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.642957 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.642970 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.642986 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.642999 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:05Z","lastTransitionTime":"2025-11-26T00:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.745592 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.745630 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.745641 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.745655 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.745666 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:05Z","lastTransitionTime":"2025-11-26T00:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.848404 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.848473 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.848488 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.848510 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.848524 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:05Z","lastTransitionTime":"2025-11-26T00:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.952496 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.952544 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.952559 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.952577 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:05 crc kubenswrapper[4875]: I1126 00:08:05.952600 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:05Z","lastTransitionTime":"2025-11-26T00:08:05Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.054611 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.054654 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.054663 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.054678 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.054687 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:06Z","lastTransitionTime":"2025-11-26T00:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.157324 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.157358 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.157370 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.157384 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.157394 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:06Z","lastTransitionTime":"2025-11-26T00:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.259889 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.260139 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.260219 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.260297 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.260369 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:06Z","lastTransitionTime":"2025-11-26T00:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.362076 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.362119 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.362129 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.362146 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.362158 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:06Z","lastTransitionTime":"2025-11-26T00:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.464185 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.464226 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.464234 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.464248 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.464259 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:06Z","lastTransitionTime":"2025-11-26T00:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.528483 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:06 crc kubenswrapper[4875]: E1126 00:08:06.528665 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.567142 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.567184 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.567193 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.567208 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.567222 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:06Z","lastTransitionTime":"2025-11-26T00:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.670047 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.670128 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.670154 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.670188 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.670211 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:06Z","lastTransitionTime":"2025-11-26T00:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.772630 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.772669 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.772679 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.772695 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.772706 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:06Z","lastTransitionTime":"2025-11-26T00:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.878453 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.878510 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.878527 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.878543 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.878554 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:06Z","lastTransitionTime":"2025-11-26T00:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.982250 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.982328 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.982347 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.982378 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:06 crc kubenswrapper[4875]: I1126 00:08:06.982401 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:06Z","lastTransitionTime":"2025-11-26T00:08:06Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.086384 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.086684 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.086710 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.086734 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.086756 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:07Z","lastTransitionTime":"2025-11-26T00:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.188827 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.188868 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.188877 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.188910 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.188920 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:07Z","lastTransitionTime":"2025-11-26T00:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.293569 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.293681 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.293702 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.293728 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.293749 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:07Z","lastTransitionTime":"2025-11-26T00:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.395768 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.395810 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.395822 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.395840 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.395851 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:07Z","lastTransitionTime":"2025-11-26T00:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.499695 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.499734 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.499743 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.499760 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.499770 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:07Z","lastTransitionTime":"2025-11-26T00:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.529169 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.529186 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.529342 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:07 crc kubenswrapper[4875]: E1126 00:08:07.529482 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:07 crc kubenswrapper[4875]: E1126 00:08:07.529707 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:07 crc kubenswrapper[4875]: E1126 00:08:07.530341 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.555258 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.573335 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.583318 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.594617 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.602209 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.602252 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.602266 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.602283 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.602296 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:07Z","lastTransitionTime":"2025-11-26T00:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.605856 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.616453 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.628300 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.637270 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.647596 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.675316 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.689633 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.699625 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.703837 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.703873 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.703891 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.703908 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.703920 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:07Z","lastTransitionTime":"2025-11-26T00:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.709701 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.719770 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d127797-c509-4d81-950c-2e4bc7f82786\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec22a9fda336e3f85bcbaf3c8fee2d1096b9f9eaede26d1246858bf393ad5b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fffb96f456bc7cb372dceab2ad7310da04e2487d43530da595c2b865ec8e02a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11193f8290474fdb88f54086c6e42c01f4630df442b28819c4917165ef46a5cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.729954 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.746514 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:55Z\\\",\\\"message\\\":\\\"uster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 00:07:55.341064 6494 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:07:55.341125 6494 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.755480 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.766663 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:07Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.805631 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.805667 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.805678 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.805694 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.805706 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:07Z","lastTransitionTime":"2025-11-26T00:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.910426 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.910488 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.910510 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.910535 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:07 crc kubenswrapper[4875]: I1126 00:08:07.910558 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:07Z","lastTransitionTime":"2025-11-26T00:08:07Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.013879 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.013955 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.013970 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.013987 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.014029 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:08Z","lastTransitionTime":"2025-11-26T00:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.117017 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.117096 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.117110 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.117127 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.117138 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:08Z","lastTransitionTime":"2025-11-26T00:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.220192 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.220248 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.220261 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.220279 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.220293 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:08Z","lastTransitionTime":"2025-11-26T00:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.323118 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.323161 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.323174 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.323190 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.323202 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:08Z","lastTransitionTime":"2025-11-26T00:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.425271 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.425315 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.425328 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.425344 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.425357 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:08Z","lastTransitionTime":"2025-11-26T00:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.527837 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.527869 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.527877 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.527891 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.527917 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:08Z","lastTransitionTime":"2025-11-26T00:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.527973 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:08 crc kubenswrapper[4875]: E1126 00:08:08.528091 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.630287 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.630323 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.630333 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.630346 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.630354 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:08Z","lastTransitionTime":"2025-11-26T00:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.733345 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.733377 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.733386 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.733399 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.733422 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:08Z","lastTransitionTime":"2025-11-26T00:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.834982 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.835017 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.835026 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.835061 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.835072 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:08Z","lastTransitionTime":"2025-11-26T00:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.937738 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.937774 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.937785 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.937800 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:08 crc kubenswrapper[4875]: I1126 00:08:08.937811 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:08Z","lastTransitionTime":"2025-11-26T00:08:08Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.040483 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.040510 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.040518 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.040532 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.040543 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:09Z","lastTransitionTime":"2025-11-26T00:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.142404 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.142486 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.142504 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.142524 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.142539 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:09Z","lastTransitionTime":"2025-11-26T00:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.245565 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.245609 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.245621 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.245637 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.245651 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:09Z","lastTransitionTime":"2025-11-26T00:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.348501 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.348554 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.348568 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.348585 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.348600 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:09Z","lastTransitionTime":"2025-11-26T00:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.450820 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.450865 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.450878 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.450910 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.450947 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:09Z","lastTransitionTime":"2025-11-26T00:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.528734 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.528779 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:09 crc kubenswrapper[4875]: E1126 00:08:09.528900 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.528957 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:09 crc kubenswrapper[4875]: E1126 00:08:09.529063 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:09 crc kubenswrapper[4875]: E1126 00:08:09.529127 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.553485 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.553556 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.553573 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.553597 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.553614 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:09Z","lastTransitionTime":"2025-11-26T00:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.655582 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.655629 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.655646 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.655671 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.655687 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:09Z","lastTransitionTime":"2025-11-26T00:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.757627 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.757660 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.757667 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.757681 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.757689 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:09Z","lastTransitionTime":"2025-11-26T00:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.860031 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.860068 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.860076 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.860090 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.860099 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:09Z","lastTransitionTime":"2025-11-26T00:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.962104 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.962146 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.962159 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.962176 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:09 crc kubenswrapper[4875]: I1126 00:08:09.962189 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:09Z","lastTransitionTime":"2025-11-26T00:08:09Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.064677 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.064729 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.064748 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.064770 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.064786 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:10Z","lastTransitionTime":"2025-11-26T00:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.166697 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.166744 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.166755 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.166777 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.166788 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:10Z","lastTransitionTime":"2025-11-26T00:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.269221 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.269643 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.269678 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.269714 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.269735 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:10Z","lastTransitionTime":"2025-11-26T00:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.372385 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.372438 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.372447 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.372462 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.372472 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:10Z","lastTransitionTime":"2025-11-26T00:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.474955 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.474992 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.475001 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.475015 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.475023 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:10Z","lastTransitionTime":"2025-11-26T00:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.528043 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:10 crc kubenswrapper[4875]: E1126 00:08:10.528159 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.577831 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.577883 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.577892 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.577907 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.577916 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:10Z","lastTransitionTime":"2025-11-26T00:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.683905 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.683982 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.683994 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.684009 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.684020 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:10Z","lastTransitionTime":"2025-11-26T00:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.786339 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.786470 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.786483 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.786500 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.786511 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:10Z","lastTransitionTime":"2025-11-26T00:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.888808 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.888850 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.888864 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.888879 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.888889 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:10Z","lastTransitionTime":"2025-11-26T00:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.992011 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.992041 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.992051 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.992066 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:10 crc kubenswrapper[4875]: I1126 00:08:10.992076 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:10Z","lastTransitionTime":"2025-11-26T00:08:10Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.094808 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.094875 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.094889 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.094908 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.094921 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:11Z","lastTransitionTime":"2025-11-26T00:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.196637 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.196690 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.196701 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.196716 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.196727 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:11Z","lastTransitionTime":"2025-11-26T00:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.298479 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.298528 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.298537 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.298552 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.298561 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:11Z","lastTransitionTime":"2025-11-26T00:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.400618 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.400675 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.400687 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.400708 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.400723 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:11Z","lastTransitionTime":"2025-11-26T00:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.503329 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.503378 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.503390 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.503436 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.503450 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:11Z","lastTransitionTime":"2025-11-26T00:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.529006 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.529044 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.529058 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:11 crc kubenswrapper[4875]: E1126 00:08:11.529130 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:11 crc kubenswrapper[4875]: E1126 00:08:11.529262 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:11 crc kubenswrapper[4875]: E1126 00:08:11.529720 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.529981 4875 scope.go:117] "RemoveContainer" containerID="93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025" Nov 26 00:08:11 crc kubenswrapper[4875]: E1126 00:08:11.530231 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.605499 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.605540 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.605552 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.605566 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.605577 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:11Z","lastTransitionTime":"2025-11-26T00:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.708136 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.708202 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.708281 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.708327 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.708354 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:11Z","lastTransitionTime":"2025-11-26T00:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.810971 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.811017 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.811031 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.811048 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.811061 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:11Z","lastTransitionTime":"2025-11-26T00:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.913169 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.913212 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.913220 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.913234 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:11 crc kubenswrapper[4875]: I1126 00:08:11.913242 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:11Z","lastTransitionTime":"2025-11-26T00:08:11Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.016857 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.016929 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.016942 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.016970 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.016986 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:12Z","lastTransitionTime":"2025-11-26T00:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.121111 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.121200 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.121220 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.121252 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.121276 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:12Z","lastTransitionTime":"2025-11-26T00:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.223964 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.224005 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.224015 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.224028 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.224038 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:12Z","lastTransitionTime":"2025-11-26T00:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.326693 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.326739 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.326749 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.326767 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.326779 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:12Z","lastTransitionTime":"2025-11-26T00:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.429302 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.429355 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.429371 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.429393 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.429427 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:12Z","lastTransitionTime":"2025-11-26T00:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.528777 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:12 crc kubenswrapper[4875]: E1126 00:08:12.528993 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.533357 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.533393 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.533406 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.533443 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.533458 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:12Z","lastTransitionTime":"2025-11-26T00:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.637406 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.637475 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.637486 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.637504 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.637515 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:12Z","lastTransitionTime":"2025-11-26T00:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.741002 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.741107 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.741149 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.741196 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.741226 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:12Z","lastTransitionTime":"2025-11-26T00:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.843930 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.843999 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.844015 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.844045 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.844063 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:12Z","lastTransitionTime":"2025-11-26T00:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.947201 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.947265 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.947284 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.947317 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:12 crc kubenswrapper[4875]: I1126 00:08:12.947339 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:12Z","lastTransitionTime":"2025-11-26T00:08:12Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.054349 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.054447 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.054463 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.054484 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.054497 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:13Z","lastTransitionTime":"2025-11-26T00:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.157113 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.157169 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.157188 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.157212 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.157229 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:13Z","lastTransitionTime":"2025-11-26T00:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.260219 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.260269 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.260281 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.260300 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.260312 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:13Z","lastTransitionTime":"2025-11-26T00:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.363191 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.363238 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.363248 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.363270 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.363280 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:13Z","lastTransitionTime":"2025-11-26T00:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.449020 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:13 crc kubenswrapper[4875]: E1126 00:08:13.449268 4875 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:08:13 crc kubenswrapper[4875]: E1126 00:08:13.449372 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs podName:3b640f09-f96d-4197-97be-f50e211e484d nodeName:}" failed. No retries permitted until 2025-11-26 00:08:45.449339555 +0000 UTC m=+98.639315280 (durationBeforeRetry 32s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs") pod "network-metrics-daemon-jhkp7" (UID: "3b640f09-f96d-4197-97be-f50e211e484d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.466289 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.466356 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.466374 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.466399 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.466442 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:13Z","lastTransitionTime":"2025-11-26T00:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.528220 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.528257 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.528230 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:13 crc kubenswrapper[4875]: E1126 00:08:13.528374 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:13 crc kubenswrapper[4875]: E1126 00:08:13.528650 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:13 crc kubenswrapper[4875]: E1126 00:08:13.528793 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.568962 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.569026 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.569048 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.569080 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.569101 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:13Z","lastTransitionTime":"2025-11-26T00:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.671775 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.671839 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.671864 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.671894 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.671921 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:13Z","lastTransitionTime":"2025-11-26T00:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.774487 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.774538 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.774554 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.774569 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.774580 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:13Z","lastTransitionTime":"2025-11-26T00:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.877203 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.877284 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.877311 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.877374 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.877437 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:13Z","lastTransitionTime":"2025-11-26T00:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.981558 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.981636 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.981655 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.981692 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:13 crc kubenswrapper[4875]: I1126 00:08:13.981712 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:13Z","lastTransitionTime":"2025-11-26T00:08:13Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.084861 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.084907 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.084917 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.084932 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.084941 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.187674 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.187716 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.187727 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.187742 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.187755 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.290861 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.290908 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.290921 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.290936 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.290949 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.393795 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.393848 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.393858 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.393879 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.393892 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.496094 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.496156 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.496169 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.496188 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.496199 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.528462 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:14 crc kubenswrapper[4875]: E1126 00:08:14.528697 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.598840 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.598878 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.598890 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.598905 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.598917 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.702284 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.702332 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.702347 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.702368 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.702381 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.805792 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.805819 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.805828 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.805841 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.805850 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.821477 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.821526 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.821539 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.821558 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.821571 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: E1126 00:08:14.834783 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:14Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.839503 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.839543 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.839551 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.839568 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.839579 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: E1126 00:08:14.850944 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:14Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.855449 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.855491 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.855501 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.855518 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.855529 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: E1126 00:08:14.866993 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:14Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.875111 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.875161 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.875174 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.875190 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.875201 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: E1126 00:08:14.888945 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:14Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.893947 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.894128 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.894284 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.894459 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.894588 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:14 crc kubenswrapper[4875]: E1126 00:08:14.907605 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:14Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:14Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:14 crc kubenswrapper[4875]: E1126 00:08:14.908035 4875 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.910005 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.910157 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.910297 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.910509 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:14 crc kubenswrapper[4875]: I1126 00:08:14.910664 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:14Z","lastTransitionTime":"2025-11-26T00:08:14Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.014163 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.014249 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.014273 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.014315 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.014343 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:15Z","lastTransitionTime":"2025-11-26T00:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.117767 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.117817 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.117829 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.117849 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.117863 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:15Z","lastTransitionTime":"2025-11-26T00:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.221500 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.221546 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.221557 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.221576 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.221602 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:15Z","lastTransitionTime":"2025-11-26T00:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.326019 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.326096 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.326109 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.326128 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.326142 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:15Z","lastTransitionTime":"2025-11-26T00:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.428868 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.428914 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.428926 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.428943 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.428956 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:15Z","lastTransitionTime":"2025-11-26T00:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.528984 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.529064 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.528984 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:15 crc kubenswrapper[4875]: E1126 00:08:15.529225 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:15 crc kubenswrapper[4875]: E1126 00:08:15.529329 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:15 crc kubenswrapper[4875]: E1126 00:08:15.529390 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.531445 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.531488 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.531504 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.531525 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.531540 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:15Z","lastTransitionTime":"2025-11-26T00:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.633817 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.633856 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.633865 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.633878 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.633889 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:15Z","lastTransitionTime":"2025-11-26T00:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.736339 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.736393 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.736406 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.736441 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.736454 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:15Z","lastTransitionTime":"2025-11-26T00:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.839491 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.839539 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.839549 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.839566 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.839579 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:15Z","lastTransitionTime":"2025-11-26T00:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.902432 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8wvqp_9fd65949-6a8a-4828-baa4-1c88c71d5ed2/kube-multus/0.log" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.902487 4875 generic.go:334] "Generic (PLEG): container finished" podID="9fd65949-6a8a-4828-baa4-1c88c71d5ed2" containerID="e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8" exitCode=1 Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.902524 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8wvqp" event={"ID":"9fd65949-6a8a-4828-baa4-1c88c71d5ed2","Type":"ContainerDied","Data":"e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.902978 4875 scope.go:117] "RemoveContainer" containerID="e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.920508 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:15Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.942533 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.942611 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.942650 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.942919 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.942971 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:15Z","lastTransitionTime":"2025-11-26T00:08:15Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.943859 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:15Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.959750 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:15Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.971930 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:15Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:15 crc kubenswrapper[4875]: I1126 00:08:15.983509 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:15Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:15.999976 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:15Z\\\",\\\"message\\\":\\\"containers with unready status: [kube-multus]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:15Z\\\",\\\"message\\\":\\\"2025-11-26T00:07:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2\\\\n2025-11-26T00:07:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2 to /host/opt/cni/bin/\\\\n2025-11-26T00:07:30Z [verbose] multus-daemon started\\\\n2025-11-26T00:07:30Z [verbose] Readiness Indicator file check\\\\n2025-11-26T00:08:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:15Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.023799 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.038798 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.045297 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.045330 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.045340 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.045359 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.045372 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:16Z","lastTransitionTime":"2025-11-26T00:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.053173 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.066505 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.079580 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.098142 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.115391 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.131947 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.148595 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.148962 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.149005 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.149018 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.149034 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.149046 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:16Z","lastTransitionTime":"2025-11-26T00:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.164398 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d127797-c509-4d81-950c-2e4bc7f82786\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec22a9fda336e3f85bcbaf3c8fee2d1096b9f9eaede26d1246858bf393ad5b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fffb96f456bc7cb372dceab2ad7310da04e2487d43530da595c2b865ec8e02a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11193f8290474fdb88f54086c6e42c01f4630df442b28819c4917165ef46a5cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.179110 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.200918 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:55Z\\\",\\\"message\\\":\\\"uster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 00:07:55.341064 6494 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:07:55.341125 6494 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.251808 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.251845 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.251854 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.251872 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.251884 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:16Z","lastTransitionTime":"2025-11-26T00:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.354777 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.354816 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.354824 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.354842 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.354854 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:16Z","lastTransitionTime":"2025-11-26T00:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.458481 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.458525 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.458534 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.458556 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.458569 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:16Z","lastTransitionTime":"2025-11-26T00:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.528524 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:16 crc kubenswrapper[4875]: E1126 00:08:16.528794 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.562325 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.562371 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.562380 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.562395 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.562405 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:16Z","lastTransitionTime":"2025-11-26T00:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.664915 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.665177 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.665305 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.665397 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.665516 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:16Z","lastTransitionTime":"2025-11-26T00:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.767982 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.768041 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.768057 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.768079 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.768096 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:16Z","lastTransitionTime":"2025-11-26T00:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.870601 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.870660 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.870671 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.870693 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.870706 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:16Z","lastTransitionTime":"2025-11-26T00:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.910921 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8wvqp_9fd65949-6a8a-4828-baa4-1c88c71d5ed2/kube-multus/0.log" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.910994 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8wvqp" event={"ID":"9fd65949-6a8a-4828-baa4-1c88c71d5ed2","Type":"ContainerStarted","Data":"8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424"} Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.931464 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.947134 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.962477 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.973505 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.973538 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.973550 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.973598 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.973615 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:16Z","lastTransitionTime":"2025-11-26T00:08:16Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.976241 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d127797-c509-4d81-950c-2e4bc7f82786\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec22a9fda336e3f85bcbaf3c8fee2d1096b9f9eaede26d1246858bf393ad5b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fffb96f456bc7cb372dceab2ad7310da04e2487d43530da595c2b865ec8e02a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11193f8290474fdb88f54086c6e42c01f4630df442b28819c4917165ef46a5cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:16 crc kubenswrapper[4875]: I1126 00:08:16.992627 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:16Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.017759 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:55Z\\\",\\\"message\\\":\\\"uster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 00:07:55.341064 6494 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:07:55.341125 6494 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.030780 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.044855 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.058840 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.071340 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.076591 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.076635 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.076645 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.076665 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.076679 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:17Z","lastTransitionTime":"2025-11-26T00:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.081263 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.095904 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.112928 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.124710 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.137005 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.146245 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.157957 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:15Z\\\",\\\"message\\\":\\\"2025-11-26T00:07:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2\\\\n2025-11-26T00:07:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2 to /host/opt/cni/bin/\\\\n2025-11-26T00:07:30Z [verbose] multus-daemon started\\\\n2025-11-26T00:07:30Z [verbose] Readiness Indicator file check\\\\n2025-11-26T00:08:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:08:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.178728 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.178782 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.178797 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.178816 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.178825 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:17Z","lastTransitionTime":"2025-11-26T00:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.178780 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.280806 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.280845 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.280857 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.280873 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.280885 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:17Z","lastTransitionTime":"2025-11-26T00:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.383137 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.383174 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.383184 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.383201 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.383214 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:17Z","lastTransitionTime":"2025-11-26T00:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.485651 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.485694 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.485703 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.485720 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.485730 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:17Z","lastTransitionTime":"2025-11-26T00:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.528636 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.528674 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:17 crc kubenswrapper[4875]: E1126 00:08:17.528834 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.528862 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:17 crc kubenswrapper[4875]: E1126 00:08:17.528973 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:17 crc kubenswrapper[4875]: E1126 00:08:17.529124 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.540807 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.554471 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.565926 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.579336 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.588386 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d127797-c509-4d81-950c-2e4bc7f82786\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec22a9fda336e3f85bcbaf3c8fee2d1096b9f9eaede26d1246858bf393ad5b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fffb96f456bc7cb372dceab2ad7310da04e2487d43530da595c2b865ec8e02a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11193f8290474fdb88f54086c6e42c01f4630df442b28819c4917165ef46a5cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.588606 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.588629 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.588638 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.588652 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.588662 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:17Z","lastTransitionTime":"2025-11-26T00:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.598429 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.624916 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:55Z\\\",\\\"message\\\":\\\"uster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 00:07:55.341064 6494 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:07:55.341125 6494 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":2,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 20s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.637679 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.648198 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.661158 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.672652 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.683726 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.690645 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.690676 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.690685 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.690698 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.690708 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:17Z","lastTransitionTime":"2025-11-26T00:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.706126 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.721773 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.733567 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.745913 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.753919 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.768533 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:15Z\\\",\\\"message\\\":\\\"2025-11-26T00:07:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2\\\\n2025-11-26T00:07:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2 to /host/opt/cni/bin/\\\\n2025-11-26T00:07:30Z [verbose] multus-daemon started\\\\n2025-11-26T00:07:30Z [verbose] Readiness Indicator file check\\\\n2025-11-26T00:08:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:08:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:17Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.792946 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.792972 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.792980 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.792991 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.793000 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:17Z","lastTransitionTime":"2025-11-26T00:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.895278 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.895335 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.895351 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.895374 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.895391 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:17Z","lastTransitionTime":"2025-11-26T00:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.998213 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.998252 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.998262 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.998279 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:17 crc kubenswrapper[4875]: I1126 00:08:17.998291 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:17Z","lastTransitionTime":"2025-11-26T00:08:17Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.100928 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.100985 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.100998 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.101013 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.101025 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:18Z","lastTransitionTime":"2025-11-26T00:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.203115 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.203866 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.203963 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.204039 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.204116 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:18Z","lastTransitionTime":"2025-11-26T00:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.306061 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.306107 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.306124 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.306148 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.306163 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:18Z","lastTransitionTime":"2025-11-26T00:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.408152 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.408435 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.408558 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.408639 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.408708 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:18Z","lastTransitionTime":"2025-11-26T00:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.512583 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.512650 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.512670 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.512697 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.512720 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:18Z","lastTransitionTime":"2025-11-26T00:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.528242 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:18 crc kubenswrapper[4875]: E1126 00:08:18.528385 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.616083 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.616142 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.616159 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.616184 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.616205 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:18Z","lastTransitionTime":"2025-11-26T00:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.718676 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.718768 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.718802 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.718840 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.718867 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:18Z","lastTransitionTime":"2025-11-26T00:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.824392 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.824463 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.824471 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.824485 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.824493 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:18Z","lastTransitionTime":"2025-11-26T00:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.926864 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.926913 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.926925 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.926942 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:18 crc kubenswrapper[4875]: I1126 00:08:18.926954 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:18Z","lastTransitionTime":"2025-11-26T00:08:18Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.029524 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.029562 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.029573 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.029588 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.029597 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:19Z","lastTransitionTime":"2025-11-26T00:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.132333 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.132391 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.132403 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.132438 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.132448 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:19Z","lastTransitionTime":"2025-11-26T00:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.235298 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.235338 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.235347 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.235360 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.235369 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:19Z","lastTransitionTime":"2025-11-26T00:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.337839 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.337898 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.337913 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.337941 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.337958 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:19Z","lastTransitionTime":"2025-11-26T00:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.440154 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.440204 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.440216 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.440231 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.440242 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:19Z","lastTransitionTime":"2025-11-26T00:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.528795 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.528836 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.528817 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:19 crc kubenswrapper[4875]: E1126 00:08:19.528951 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:19 crc kubenswrapper[4875]: E1126 00:08:19.529019 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:19 crc kubenswrapper[4875]: E1126 00:08:19.529238 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.542556 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.542604 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.542616 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.542637 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.542677 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:19Z","lastTransitionTime":"2025-11-26T00:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.645985 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.646074 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.646097 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.646265 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.646303 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:19Z","lastTransitionTime":"2025-11-26T00:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.749485 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.749561 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.749575 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.749601 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.749641 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:19Z","lastTransitionTime":"2025-11-26T00:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.852730 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.852808 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.852827 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.852858 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.852879 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:19Z","lastTransitionTime":"2025-11-26T00:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.955588 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.955954 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.955967 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.955984 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:19 crc kubenswrapper[4875]: I1126 00:08:19.955994 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:19Z","lastTransitionTime":"2025-11-26T00:08:19Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.058551 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.058586 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.058594 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.058608 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.058616 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:20Z","lastTransitionTime":"2025-11-26T00:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.161134 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.161189 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.161205 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.161226 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.161243 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:20Z","lastTransitionTime":"2025-11-26T00:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.263129 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.263171 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.263179 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.263192 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.263201 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:20Z","lastTransitionTime":"2025-11-26T00:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.365143 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.365173 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.365184 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.365318 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.365350 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:20Z","lastTransitionTime":"2025-11-26T00:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.467505 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.467544 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.467553 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.467566 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.467604 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:20Z","lastTransitionTime":"2025-11-26T00:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.527983 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:20 crc kubenswrapper[4875]: E1126 00:08:20.528106 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.569701 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.569743 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.569751 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.569764 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.569773 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:20Z","lastTransitionTime":"2025-11-26T00:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.671735 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.671773 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.671784 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.671800 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.671812 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:20Z","lastTransitionTime":"2025-11-26T00:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.774306 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.774338 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.774347 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.774359 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.774369 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:20Z","lastTransitionTime":"2025-11-26T00:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.877458 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.877499 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.877507 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.877520 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.877528 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:20Z","lastTransitionTime":"2025-11-26T00:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.979982 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.980386 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.980481 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.980517 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:20 crc kubenswrapper[4875]: I1126 00:08:20.980543 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:20Z","lastTransitionTime":"2025-11-26T00:08:20Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.083606 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.083647 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.083656 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.083669 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.083678 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:21Z","lastTransitionTime":"2025-11-26T00:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.186947 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.186989 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.187004 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.187021 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.187032 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:21Z","lastTransitionTime":"2025-11-26T00:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.289089 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.289132 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.289148 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.289164 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.289177 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:21Z","lastTransitionTime":"2025-11-26T00:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.392992 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.393066 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.393081 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.393104 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.393123 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:21Z","lastTransitionTime":"2025-11-26T00:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.495626 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.495690 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.495707 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.495725 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.495738 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:21Z","lastTransitionTime":"2025-11-26T00:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.528337 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.528396 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:21 crc kubenswrapper[4875]: E1126 00:08:21.528476 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.528337 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:21 crc kubenswrapper[4875]: E1126 00:08:21.528576 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:21 crc kubenswrapper[4875]: E1126 00:08:21.528748 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.597309 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.597348 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.597356 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.597389 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.597442 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:21Z","lastTransitionTime":"2025-11-26T00:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.699625 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.700533 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.700559 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.700588 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.700613 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:21Z","lastTransitionTime":"2025-11-26T00:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.803707 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.803769 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.803791 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.803813 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.803829 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:21Z","lastTransitionTime":"2025-11-26T00:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.905534 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.905575 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.905585 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.905599 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:21 crc kubenswrapper[4875]: I1126 00:08:21.905608 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:21Z","lastTransitionTime":"2025-11-26T00:08:21Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.007954 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.008044 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.008078 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.008109 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.008132 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:22Z","lastTransitionTime":"2025-11-26T00:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.111668 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.111727 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.111749 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.111778 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.111802 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:22Z","lastTransitionTime":"2025-11-26T00:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.214043 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.214095 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.214105 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.214118 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.214128 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:22Z","lastTransitionTime":"2025-11-26T00:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.316675 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.316734 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.316748 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.316768 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.316782 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:22Z","lastTransitionTime":"2025-11-26T00:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.419046 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.419106 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.419118 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.419133 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.419145 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:22Z","lastTransitionTime":"2025-11-26T00:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.523324 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.523354 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.523363 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.523376 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.523454 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:22Z","lastTransitionTime":"2025-11-26T00:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.528773 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:22 crc kubenswrapper[4875]: E1126 00:08:22.528869 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.626113 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.626173 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.626183 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.626197 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.626205 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:22Z","lastTransitionTime":"2025-11-26T00:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.730050 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.730096 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.730106 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.730121 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.730130 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:22Z","lastTransitionTime":"2025-11-26T00:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.833117 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.833208 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.833223 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.833237 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.833257 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:22Z","lastTransitionTime":"2025-11-26T00:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.935075 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.935101 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.935109 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.935120 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:22 crc kubenswrapper[4875]: I1126 00:08:22.935128 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:22Z","lastTransitionTime":"2025-11-26T00:08:22Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.037256 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.037315 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.037326 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.037342 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.037353 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:23Z","lastTransitionTime":"2025-11-26T00:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.139278 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.139314 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.139323 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.139335 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.139344 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:23Z","lastTransitionTime":"2025-11-26T00:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.241890 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.241926 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.241935 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.241948 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.241957 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:23Z","lastTransitionTime":"2025-11-26T00:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.344842 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.344903 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.344922 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.344951 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.344974 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:23Z","lastTransitionTime":"2025-11-26T00:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.447848 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.447905 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.447922 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.447946 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.447965 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:23Z","lastTransitionTime":"2025-11-26T00:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.528267 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.528311 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.528310 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:23 crc kubenswrapper[4875]: E1126 00:08:23.528388 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:23 crc kubenswrapper[4875]: E1126 00:08:23.528578 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:23 crc kubenswrapper[4875]: E1126 00:08:23.528614 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.550195 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.550231 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.550242 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.550262 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.550275 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:23Z","lastTransitionTime":"2025-11-26T00:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.653448 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.653526 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.653545 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.653592 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.653620 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:23Z","lastTransitionTime":"2025-11-26T00:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.756665 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.756712 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.756724 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.756746 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.756762 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:23Z","lastTransitionTime":"2025-11-26T00:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.859533 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.859592 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.859605 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.859624 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.859635 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:23Z","lastTransitionTime":"2025-11-26T00:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.962701 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.962766 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.962784 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.962808 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:23 crc kubenswrapper[4875]: I1126 00:08:23.962826 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:23Z","lastTransitionTime":"2025-11-26T00:08:23Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.065959 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.065995 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.066006 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.066023 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.066033 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:24Z","lastTransitionTime":"2025-11-26T00:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.168752 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.168796 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.168808 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.168828 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.168843 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:24Z","lastTransitionTime":"2025-11-26T00:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.271597 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.271659 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.271682 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.271714 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.271738 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:24Z","lastTransitionTime":"2025-11-26T00:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.375017 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.375090 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.375109 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.375135 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.375153 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:24Z","lastTransitionTime":"2025-11-26T00:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.478245 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.478316 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.478341 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.478370 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.478390 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:24Z","lastTransitionTime":"2025-11-26T00:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.528969 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:24 crc kubenswrapper[4875]: E1126 00:08:24.529219 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.581524 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.581572 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.581585 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.581604 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.581616 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:24Z","lastTransitionTime":"2025-11-26T00:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.683589 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.683635 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.683647 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.683690 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.683704 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:24Z","lastTransitionTime":"2025-11-26T00:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.787264 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.787307 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.787315 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.787333 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.787343 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:24Z","lastTransitionTime":"2025-11-26T00:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.890093 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.890153 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.890173 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.890192 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.890205 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:24Z","lastTransitionTime":"2025-11-26T00:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.992072 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.992115 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.992129 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.992150 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:24 crc kubenswrapper[4875]: I1126 00:08:24.992166 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:24Z","lastTransitionTime":"2025-11-26T00:08:24Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.094491 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.094539 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.094554 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.094575 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.094589 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.140408 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.140466 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.140480 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.140498 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.140510 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: E1126 00:08:25.154570 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:25Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.157989 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.158033 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.158045 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.158064 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.158075 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: E1126 00:08:25.169576 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:25Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.173172 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.173207 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.173219 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.173235 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.173246 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: E1126 00:08:25.186845 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:25Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.190316 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.190352 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.190363 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.190378 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.190389 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: E1126 00:08:25.204682 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:25Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.207714 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.207789 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.207806 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.207819 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.207830 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: E1126 00:08:25.221344 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:25Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:25Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:25 crc kubenswrapper[4875]: E1126 00:08:25.221521 4875 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.223218 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.223252 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.223262 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.223275 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.223284 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.326936 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.326986 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.327000 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.327021 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.327036 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.429028 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.429082 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.429095 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.429111 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.429459 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.529272 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:25 crc kubenswrapper[4875]: E1126 00:08:25.529610 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.529795 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:25 crc kubenswrapper[4875]: E1126 00:08:25.529959 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.530577 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:25 crc kubenswrapper[4875]: E1126 00:08:25.530780 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.530912 4875 scope.go:117] "RemoveContainer" containerID="93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.531539 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.531568 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.531581 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.531596 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.531607 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.633990 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.634028 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.634036 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.634048 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.634056 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.738248 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.738298 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.738309 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.738328 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.738341 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.841172 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.841219 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.841229 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.841248 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.841260 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.943490 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/2.log" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.944261 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.944292 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.944303 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.944320 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.944331 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:25Z","lastTransitionTime":"2025-11-26T00:08:25Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.947055 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d"} Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.947768 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.967957 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:25Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:25 crc kubenswrapper[4875]: I1126 00:08:25.990197 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:25Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.002594 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.027235 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.046802 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.046863 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.046878 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.046903 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.046920 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:26Z","lastTransitionTime":"2025-11-26T00:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.051469 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.064222 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.077311 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.092660 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.104554 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.120634 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:15Z\\\",\\\"message\\\":\\\"2025-11-26T00:07:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2\\\\n2025-11-26T00:07:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2 to /host/opt/cni/bin/\\\\n2025-11-26T00:07:30Z [verbose] multus-daemon started\\\\n2025-11-26T00:07:30Z [verbose] Readiness Indicator file check\\\\n2025-11-26T00:08:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:08:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.132694 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.145530 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.149744 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.149801 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.149813 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.149833 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.149846 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:26Z","lastTransitionTime":"2025-11-26T00:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.158487 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.174833 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.190093 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d127797-c509-4d81-950c-2e4bc7f82786\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec22a9fda336e3f85bcbaf3c8fee2d1096b9f9eaede26d1246858bf393ad5b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fffb96f456bc7cb372dceab2ad7310da04e2487d43530da595c2b865ec8e02a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11193f8290474fdb88f54086c6e42c01f4630df442b28819c4917165ef46a5cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.209128 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.231800 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:55Z\\\",\\\"message\\\":\\\"uster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 00:07:55.341064 6494 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:07:55.341125 6494 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:08:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.245976 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.253557 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.253604 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.253614 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.253634 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.253645 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:26Z","lastTransitionTime":"2025-11-26T00:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.355987 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.356022 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.356030 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.356042 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.356051 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:26Z","lastTransitionTime":"2025-11-26T00:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.458811 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.458854 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.458866 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.458884 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.458897 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:26Z","lastTransitionTime":"2025-11-26T00:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.528656 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:26 crc kubenswrapper[4875]: E1126 00:08:26.528773 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.561336 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.561382 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.561399 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.561449 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.561466 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:26Z","lastTransitionTime":"2025-11-26T00:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.664357 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.664401 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.664430 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.664447 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.664458 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:26Z","lastTransitionTime":"2025-11-26T00:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.767139 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.767181 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.767193 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.767210 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.767223 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:26Z","lastTransitionTime":"2025-11-26T00:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.869218 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.869263 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.869274 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.869292 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.869305 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:26Z","lastTransitionTime":"2025-11-26T00:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.953328 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/3.log" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.954596 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/2.log" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.957790 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" exitCode=1 Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.957887 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d"} Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.958034 4875 scope.go:117] "RemoveContainer" containerID="93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.959619 4875 scope.go:117] "RemoveContainer" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" Nov 26 00:08:26 crc kubenswrapper[4875]: E1126 00:08:26.960266 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.974202 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.974240 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.974250 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.974310 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.974323 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:26Z","lastTransitionTime":"2025-11-26T00:08:26Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.985988 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:26 crc kubenswrapper[4875]: I1126 00:08:26.997242 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d127797-c509-4d81-950c-2e4bc7f82786\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec22a9fda336e3f85bcbaf3c8fee2d1096b9f9eaede26d1246858bf393ad5b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fffb96f456bc7cb372dceab2ad7310da04e2487d43530da595c2b865ec8e02a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11193f8290474fdb88f54086c6e42c01f4630df442b28819c4917165ef46a5cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:26Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.016426 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.040037 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:55Z\\\",\\\"message\\\":\\\"uster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 00:07:55.341064 6494 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:07:55.341125 6494 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:26Z\\\",\\\"message\\\":\\\"ssIP event handler 8\\\\nI1126 00:08:26.489054 6866 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 00:08:26.489070 6866 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 00:08:26.489032 6866 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 00:08:26.489082 6866 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 00:08:26.489101 6866 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 00:08:26.489148 6866 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 00:08:26.489202 6866 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 00:08:26.489201 6866 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 00:08:26.489235 6866 factory.go:656] Stopping watch factory\\\\nI1126 00:08:26.489259 6866 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 00:08:26.489294 6866 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 00:08:26.489847 6866 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 00:08:26.490022 6866 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 00:08:26.490104 6866 ovnkube.go:599] Stopped ovnkube\\\\nI1126 00:08:26.490210 6866 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:08:26.490449 6866 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:08:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.054669 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.070101 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.077358 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.077399 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.077434 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.077455 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.077470 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:27Z","lastTransitionTime":"2025-11-26T00:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.086812 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.100681 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.111840 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.124892 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:15Z\\\",\\\"message\\\":\\\"2025-11-26T00:07:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2\\\\n2025-11-26T00:07:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2 to /host/opt/cni/bin/\\\\n2025-11-26T00:07:30Z [verbose] multus-daemon started\\\\n2025-11-26T00:07:30Z [verbose] Readiness Indicator file check\\\\n2025-11-26T00:08:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:08:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.144963 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.164016 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.179094 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.180440 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.180474 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.180486 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.180505 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.180516 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:27Z","lastTransitionTime":"2025-11-26T00:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.198457 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.212517 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.224874 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.240680 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.252906 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.283588 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.283635 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.283649 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.283669 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.283686 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:27Z","lastTransitionTime":"2025-11-26T00:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.386619 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.386668 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.386681 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.386701 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.386718 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:27Z","lastTransitionTime":"2025-11-26T00:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.489966 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.490048 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.490070 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.490100 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.490120 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:27Z","lastTransitionTime":"2025-11-26T00:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.528032 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.528102 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.528127 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:27 crc kubenswrapper[4875]: E1126 00:08:27.528183 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:27 crc kubenswrapper[4875]: E1126 00:08:27.528254 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:27 crc kubenswrapper[4875]: E1126 00:08:27.528350 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.543627 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.556507 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.566690 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.578513 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.587660 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d127797-c509-4d81-950c-2e4bc7f82786\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec22a9fda336e3f85bcbaf3c8fee2d1096b9f9eaede26d1246858bf393ad5b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fffb96f456bc7cb372dceab2ad7310da04e2487d43530da595c2b865ec8e02a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11193f8290474fdb88f54086c6e42c01f4630df442b28819c4917165ef46a5cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.592507 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.592531 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.592539 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.592552 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.592562 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:27Z","lastTransitionTime":"2025-11-26T00:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.601094 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.626227 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://93c48708c135563d79026fd18dbd330523d6cce108cf7f871fb6553ea12e7025\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:07:55Z\\\",\\\"message\\\":\\\"uster\\\\\\\", UUID:\\\\\\\"2a3fb1a3-a476-4e14-bcf5-fb79af60206a\\\\\\\", Protocol:\\\\\\\"tcp\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:false, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{}, Templates:services.TemplateMap{}, Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}, built lbs: []services.LB{services.LB{Name:\\\\\\\"Service_openshift-service-ca-operator/metrics_TCP_cluster\\\\\\\", UUID:\\\\\\\"\\\\\\\", Protocol:\\\\\\\"TCP\\\\\\\", ExternalIDs:map[string]string{\\\\\\\"k8s.ovn.org/kind\\\\\\\":\\\\\\\"Service\\\\\\\", \\\\\\\"k8s.ovn.org/owner\\\\\\\":\\\\\\\"openshift-service-ca-operator/metrics\\\\\\\"}, Opts:services.LBOpts{Reject:true, EmptyLBEvents:false, AffinityTimeOut:0, SkipSNAT:false, Template:false, AddressFamily:\\\\\\\"\\\\\\\"}, Rules:[]services.LBRule{services.LBRule{Source:services.Addr{IP:\\\\\\\"10.217.4.40\\\\\\\", Port:443, Template:(*services.Template)(nil)}, Targets:[]services.Addr{}}}, Templates:services.TemplateMap(nil), Switches:[]string{}, Routers:[]string{}, Groups:[]string{\\\\\\\"clusterLBGroup\\\\\\\"}}}\\\\nI1126 00:07:55.341064 6494 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:07:55.341125 6494 ovnkube.go:137] failed to run ovnkube: [failed to start network controller: failed to start default network controller: unable to create\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:54Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:26Z\\\",\\\"message\\\":\\\"ssIP event handler 8\\\\nI1126 00:08:26.489054 6866 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 00:08:26.489070 6866 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 00:08:26.489032 6866 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 00:08:26.489082 6866 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 00:08:26.489101 6866 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 00:08:26.489148 6866 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 00:08:26.489202 6866 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 00:08:26.489201 6866 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 00:08:26.489235 6866 factory.go:656] Stopping watch factory\\\\nI1126 00:08:26.489259 6866 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 00:08:26.489294 6866 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 00:08:26.489847 6866 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 00:08:26.490022 6866 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 00:08:26.490104 6866 ovnkube.go:599] Stopped ovnkube\\\\nI1126 00:08:26.490210 6866 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:08:26.490449 6866 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:08:25Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.636494 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.648876 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.660264 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.671520 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.682506 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.695509 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.695537 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.695547 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.695561 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.695571 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:27Z","lastTransitionTime":"2025-11-26T00:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.699324 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.713506 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.726690 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.738751 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.748111 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.760383 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:15Z\\\",\\\"message\\\":\\\"2025-11-26T00:07:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2\\\\n2025-11-26T00:07:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2 to /host/opt/cni/bin/\\\\n2025-11-26T00:07:30Z [verbose] multus-daemon started\\\\n2025-11-26T00:07:30Z [verbose] Readiness Indicator file check\\\\n2025-11-26T00:08:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:08:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.800190 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.800454 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.800556 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.800633 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.800699 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:27Z","lastTransitionTime":"2025-11-26T00:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.902579 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.902832 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.902944 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.903098 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.903160 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:27Z","lastTransitionTime":"2025-11-26T00:08:27Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.961516 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/3.log" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.964853 4875 scope.go:117] "RemoveContainer" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" Nov 26 00:08:27 crc kubenswrapper[4875]: E1126 00:08:27.965063 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.981335 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:27 crc kubenswrapper[4875]: I1126 00:08:27.993869 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:27Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.004019 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.006392 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.006456 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.006468 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.006483 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.006500 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:28Z","lastTransitionTime":"2025-11-26T00:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.020258 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.033195 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.049777 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:15Z\\\",\\\"message\\\":\\\"2025-11-26T00:07:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2\\\\n2025-11-26T00:07:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2 to /host/opt/cni/bin/\\\\n2025-11-26T00:07:30Z [verbose] multus-daemon started\\\\n2025-11-26T00:07:30Z [verbose] Readiness Indicator file check\\\\n2025-11-26T00:08:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:08:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.063396 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.076166 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.087137 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.099804 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.108396 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.108451 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.108463 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.108480 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.108491 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:28Z","lastTransitionTime":"2025-11-26T00:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.109811 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d127797-c509-4d81-950c-2e4bc7f82786\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec22a9fda336e3f85bcbaf3c8fee2d1096b9f9eaede26d1246858bf393ad5b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fffb96f456bc7cb372dceab2ad7310da04e2487d43530da595c2b865ec8e02a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11193f8290474fdb88f54086c6e42c01f4630df442b28819c4917165ef46a5cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.120868 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.137601 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:26Z\\\",\\\"message\\\":\\\"ssIP event handler 8\\\\nI1126 00:08:26.489054 6866 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 00:08:26.489070 6866 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 00:08:26.489032 6866 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 00:08:26.489082 6866 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 00:08:26.489101 6866 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 00:08:26.489148 6866 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 00:08:26.489202 6866 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 00:08:26.489201 6866 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 00:08:26.489235 6866 factory.go:656] Stopping watch factory\\\\nI1126 00:08:26.489259 6866 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 00:08:26.489294 6866 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 00:08:26.489847 6866 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 00:08:26.490022 6866 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 00:08:26.490104 6866 ovnkube.go:599] Stopped ovnkube\\\\nI1126 00:08:26.490210 6866 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:08:26.490449 6866 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:08:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.148037 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.158649 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.169340 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.179552 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.188722 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:28Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.210402 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.210477 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.210485 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.210502 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.210511 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:28Z","lastTransitionTime":"2025-11-26T00:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.312961 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.313009 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.313018 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.313034 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.313044 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:28Z","lastTransitionTime":"2025-11-26T00:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.414966 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.415205 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.415322 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.415503 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.415595 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:28Z","lastTransitionTime":"2025-11-26T00:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.517796 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.517833 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.517843 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.517858 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.517868 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:28Z","lastTransitionTime":"2025-11-26T00:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.528116 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:28 crc kubenswrapper[4875]: E1126 00:08:28.528268 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.620703 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.620746 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.620757 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.620773 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.620783 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:28Z","lastTransitionTime":"2025-11-26T00:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.723597 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.723684 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.723710 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.723740 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.723766 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:28Z","lastTransitionTime":"2025-11-26T00:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.826494 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.826555 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.826573 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.826596 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.826614 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:28Z","lastTransitionTime":"2025-11-26T00:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.929348 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.929385 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.929396 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.929425 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:28 crc kubenswrapper[4875]: I1126 00:08:28.929434 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:28Z","lastTransitionTime":"2025-11-26T00:08:28Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.032556 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.032648 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.032676 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.032719 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.032745 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:29Z","lastTransitionTime":"2025-11-26T00:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.135258 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.135314 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.135328 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.135345 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.135358 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:29Z","lastTransitionTime":"2025-11-26T00:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.238144 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.238184 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.238196 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.238211 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.238222 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:29Z","lastTransitionTime":"2025-11-26T00:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.340680 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.340757 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.340781 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.340811 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.340835 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:29Z","lastTransitionTime":"2025-11-26T00:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.443655 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.443707 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.443718 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.443736 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.443747 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:29Z","lastTransitionTime":"2025-11-26T00:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.528996 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.529066 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.528996 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:29 crc kubenswrapper[4875]: E1126 00:08:29.529243 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:29 crc kubenswrapper[4875]: E1126 00:08:29.529375 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:29 crc kubenswrapper[4875]: E1126 00:08:29.529507 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.545467 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.545505 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.545516 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.545531 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.545543 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:29Z","lastTransitionTime":"2025-11-26T00:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.648740 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.648786 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.648800 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.648819 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.648831 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:29Z","lastTransitionTime":"2025-11-26T00:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.751640 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.751688 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.751700 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.751718 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.751729 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:29Z","lastTransitionTime":"2025-11-26T00:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.854718 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.854752 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.854760 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.854773 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.854784 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:29Z","lastTransitionTime":"2025-11-26T00:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.957334 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.957422 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.957432 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.957446 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:29 crc kubenswrapper[4875]: I1126 00:08:29.957456 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:29Z","lastTransitionTime":"2025-11-26T00:08:29Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.060115 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.060157 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.060168 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.060185 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.060196 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:30Z","lastTransitionTime":"2025-11-26T00:08:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.161832 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.161869 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.161878 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.161892 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.161903 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:30Z","lastTransitionTime":"2025-11-26T00:08:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.264353 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.264401 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.264428 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.264441 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.264451 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:30Z","lastTransitionTime":"2025-11-26T00:08:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.366949 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.366983 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.366991 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.367005 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.367013 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:30Z","lastTransitionTime":"2025-11-26T00:08:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.469138 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.469171 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.469180 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.469192 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.469200 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:30Z","lastTransitionTime":"2025-11-26T00:08:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.528243 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:30 crc kubenswrapper[4875]: E1126 00:08:30.528389 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.571663 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.571730 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.571747 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.571777 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.571792 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:30Z","lastTransitionTime":"2025-11-26T00:08:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.674083 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.674129 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.674141 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.674157 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.674170 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:30Z","lastTransitionTime":"2025-11-26T00:08:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.777297 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.777325 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.777333 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.777347 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.777356 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:30Z","lastTransitionTime":"2025-11-26T00:08:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.879474 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.879522 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.879534 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.879553 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.879565 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:30Z","lastTransitionTime":"2025-11-26T00:08:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.982172 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.982237 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.982253 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.982275 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:30 crc kubenswrapper[4875]: I1126 00:08:30.982287 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:30Z","lastTransitionTime":"2025-11-26T00:08:30Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.085488 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.085540 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.085560 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.085579 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.085593 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:31Z","lastTransitionTime":"2025-11-26T00:08:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.188483 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.188517 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.188528 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.188544 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.188554 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:31Z","lastTransitionTime":"2025-11-26T00:08:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.249054 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.249795 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.249771723 +0000 UTC m=+148.439747428 (durationBeforeRetry 1m4s). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.291236 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.291478 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.291500 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.291522 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.291538 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:31Z","lastTransitionTime":"2025-11-26T00:08:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.351172 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.351242 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.351285 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.351346 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351386 4875 configmap.go:193] Couldn't get configMap openshift-network-console/networking-console-plugin: object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351441 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351465 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351479 4875 projected.go:194] Error preparing data for projected volume kube-api-access-cqllr for pod openshift-network-diagnostics/network-check-target-xd92c: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351484 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.351466519 +0000 UTC m=+148.541442214 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "nginx-conf" (UniqueName: "kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin" not registered Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351498 4875 secret.go:188] Couldn't get secret openshift-network-console/networking-console-plugin-cert: object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351548 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr podName:3b6479f0-333b-4a96-9adf-2099afdc2447 nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.35150726 +0000 UTC m=+148.541482965 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-cqllr" (UniqueName: "kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr") pod "network-check-target-xd92c" (UID: "3b6479f0-333b-4a96-9adf-2099afdc2447") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351574 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert podName:5fe485a1-e14f-4c09-b5b9-f252bc42b7e8 nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.351557451 +0000 UTC m=+148.541533156 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "networking-console-plugin-cert" (UniqueName: "kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert") pod "networking-console-plugin-85b44fc459-gdk6g" (UID: "5fe485a1-e14f-4c09-b5b9-f252bc42b7e8") : object "openshift-network-console"/"networking-console-plugin-cert" not registered Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351581 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/kube-root-ca.crt: object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351608 4875 projected.go:288] Couldn't get configMap openshift-network-diagnostics/openshift-service-ca.crt: object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351628 4875 projected.go:194] Error preparing data for projected volume kube-api-access-s2dwl for pod openshift-network-diagnostics/network-check-source-55646444c4-trplf: [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.351697 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl podName:9d751cbb-f2e2-430d-9754-c882a5e924a5 nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.351678814 +0000 UTC m=+148.541654549 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "kube-api-access-s2dwl" (UniqueName: "kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl") pod "network-check-source-55646444c4-trplf" (UID: "9d751cbb-f2e2-430d-9754-c882a5e924a5") : [object "openshift-network-diagnostics"/"kube-root-ca.crt" not registered, object "openshift-network-diagnostics"/"openshift-service-ca.crt" not registered] Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.394546 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.394588 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.394601 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.394619 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.394630 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:31Z","lastTransitionTime":"2025-11-26T00:08:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.497181 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.497217 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.497226 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.497240 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.497249 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:31Z","lastTransitionTime":"2025-11-26T00:08:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.528743 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.528790 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.528877 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.528921 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.528996 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:31 crc kubenswrapper[4875]: E1126 00:08:31.529477 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.599187 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.599214 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.599222 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.599271 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.599281 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:31Z","lastTransitionTime":"2025-11-26T00:08:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.701711 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.701738 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.701745 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.701758 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.701766 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:31Z","lastTransitionTime":"2025-11-26T00:08:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.805370 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.805437 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.805448 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.805463 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.805472 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:31Z","lastTransitionTime":"2025-11-26T00:08:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.907721 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.907774 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.907783 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.907797 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:31 crc kubenswrapper[4875]: I1126 00:08:31.907807 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:31Z","lastTransitionTime":"2025-11-26T00:08:31Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.009967 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.010036 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.010047 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.010064 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.010078 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:32Z","lastTransitionTime":"2025-11-26T00:08:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.112667 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.112732 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.112743 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.112757 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.112767 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:32Z","lastTransitionTime":"2025-11-26T00:08:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.215394 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.215463 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.215473 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.215491 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.215501 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:32Z","lastTransitionTime":"2025-11-26T00:08:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.317763 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.317805 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.317816 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.317831 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.317842 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:32Z","lastTransitionTime":"2025-11-26T00:08:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.419897 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.419940 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.419950 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.419965 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.419974 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:32Z","lastTransitionTime":"2025-11-26T00:08:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.522117 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.522170 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.522180 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.522192 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.522200 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:32Z","lastTransitionTime":"2025-11-26T00:08:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.528370 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:32 crc kubenswrapper[4875]: E1126 00:08:32.528502 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.624886 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.624955 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.624968 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.624983 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.624993 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:32Z","lastTransitionTime":"2025-11-26T00:08:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.727120 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.727157 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.727167 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.727367 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.727379 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:32Z","lastTransitionTime":"2025-11-26T00:08:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.829984 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.830043 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.830053 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.830067 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.830077 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:32Z","lastTransitionTime":"2025-11-26T00:08:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.933302 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.933345 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.933353 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.933368 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:32 crc kubenswrapper[4875]: I1126 00:08:32.933377 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:32Z","lastTransitionTime":"2025-11-26T00:08:32Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.036505 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.036581 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.036591 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.036605 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.036614 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:33Z","lastTransitionTime":"2025-11-26T00:08:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.139013 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.139051 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.139059 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.139074 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.139086 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:33Z","lastTransitionTime":"2025-11-26T00:08:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.240978 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.241054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.241069 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.241086 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.241098 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:33Z","lastTransitionTime":"2025-11-26T00:08:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.343506 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.343552 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.343561 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.343576 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.343587 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:33Z","lastTransitionTime":"2025-11-26T00:08:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.446693 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.446731 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.446742 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.446762 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.446773 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:33Z","lastTransitionTime":"2025-11-26T00:08:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.528773 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:33 crc kubenswrapper[4875]: E1126 00:08:33.528891 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.528773 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.528792 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:33 crc kubenswrapper[4875]: E1126 00:08:33.529239 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:33 crc kubenswrapper[4875]: E1126 00:08:33.529694 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.548804 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.548833 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.548842 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.548855 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.548864 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:33Z","lastTransitionTime":"2025-11-26T00:08:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.651274 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.651316 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.651325 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.651340 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.651349 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:33Z","lastTransitionTime":"2025-11-26T00:08:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.754100 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.754165 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.754181 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.754204 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.754223 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:33Z","lastTransitionTime":"2025-11-26T00:08:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.856256 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.856299 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.856320 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.856340 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.856351 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:33Z","lastTransitionTime":"2025-11-26T00:08:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.958724 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.958768 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.958780 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.958796 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:33 crc kubenswrapper[4875]: I1126 00:08:33.958807 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:33Z","lastTransitionTime":"2025-11-26T00:08:33Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.061621 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.061663 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.061676 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.061697 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.061718 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:34Z","lastTransitionTime":"2025-11-26T00:08:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.164403 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.164467 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.164478 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.164496 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.164509 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:34Z","lastTransitionTime":"2025-11-26T00:08:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.266927 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.266995 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.267012 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.267037 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.267053 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:34Z","lastTransitionTime":"2025-11-26T00:08:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.370261 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.370317 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.370337 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.370358 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.370404 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:34Z","lastTransitionTime":"2025-11-26T00:08:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.473288 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.473336 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.473347 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.473365 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.473377 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:34Z","lastTransitionTime":"2025-11-26T00:08:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.528732 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:34 crc kubenswrapper[4875]: E1126 00:08:34.528863 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.575727 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.575777 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.575791 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.575811 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.575825 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:34Z","lastTransitionTime":"2025-11-26T00:08:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.678623 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.678777 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.678804 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.678837 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.678858 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:34Z","lastTransitionTime":"2025-11-26T00:08:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.782097 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.782150 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.782184 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.782201 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.782230 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:34Z","lastTransitionTime":"2025-11-26T00:08:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.884663 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.884711 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.884721 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.884736 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.884745 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:34Z","lastTransitionTime":"2025-11-26T00:08:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.986999 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.987081 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.987101 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.987125 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:34 crc kubenswrapper[4875]: I1126 00:08:34.987142 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:34Z","lastTransitionTime":"2025-11-26T00:08:34Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.089054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.089089 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.089098 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.089111 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.089119 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.191237 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.191331 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.191342 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.191359 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.191371 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.293764 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.293808 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.293817 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.293831 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.293842 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.389198 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.389236 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.389246 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.389262 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.389271 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: E1126 00:08:35.401206 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.405066 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.405147 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.405175 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.405209 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.405234 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: E1126 00:08:35.420001 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.424437 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.424481 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.424494 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.424513 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.424525 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: E1126 00:08:35.436802 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.440460 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.440499 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.440513 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.440530 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.440541 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: E1126 00:08:35.452207 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.455731 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.455766 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.455777 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.455793 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.455804 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: E1126 00:08:35.472301 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:35Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:35Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:35 crc kubenswrapper[4875]: E1126 00:08:35.472406 4875 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.473838 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.473870 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.473878 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.473891 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.473900 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.528795 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:35 crc kubenswrapper[4875]: E1126 00:08:35.528951 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.528826 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.529058 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:35 crc kubenswrapper[4875]: E1126 00:08:35.529098 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:35 crc kubenswrapper[4875]: E1126 00:08:35.529237 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.575955 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.575996 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.576007 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.576025 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.576036 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.678466 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.678497 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.678507 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.678520 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.678530 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.781019 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.781057 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.781066 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.781083 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.781096 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.883905 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.883951 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.883967 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.883990 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.884005 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.988535 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.988588 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.988600 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.988619 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:35 crc kubenswrapper[4875]: I1126 00:08:35.988635 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:35Z","lastTransitionTime":"2025-11-26T00:08:35Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.092008 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.092078 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.092095 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.092114 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.092129 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:36Z","lastTransitionTime":"2025-11-26T00:08:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.201486 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.201581 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.201608 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.201646 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.201672 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:36Z","lastTransitionTime":"2025-11-26T00:08:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.305363 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.305423 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.305436 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.305452 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.305460 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:36Z","lastTransitionTime":"2025-11-26T00:08:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.407728 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.407785 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.407804 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.407828 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.407845 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:36Z","lastTransitionTime":"2025-11-26T00:08:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.510859 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.510896 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.510904 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.510919 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.510929 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:36Z","lastTransitionTime":"2025-11-26T00:08:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.528475 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:36 crc kubenswrapper[4875]: E1126 00:08:36.528636 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.613867 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.613911 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.613923 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.613957 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.613967 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:36Z","lastTransitionTime":"2025-11-26T00:08:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.716252 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.716297 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.716308 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.716324 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.716335 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:36Z","lastTransitionTime":"2025-11-26T00:08:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.819242 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.819279 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.819291 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.819310 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.819321 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:36Z","lastTransitionTime":"2025-11-26T00:08:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.921991 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.922058 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.922076 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.922103 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:36 crc kubenswrapper[4875]: I1126 00:08:36.922122 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:36Z","lastTransitionTime":"2025-11-26T00:08:36Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.024772 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.024843 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.024865 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.024890 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.024907 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:37Z","lastTransitionTime":"2025-11-26T00:08:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.127462 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.127560 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.127593 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.127623 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.127647 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:37Z","lastTransitionTime":"2025-11-26T00:08:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.230280 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.230322 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.230334 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.230351 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.230363 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:37Z","lastTransitionTime":"2025-11-26T00:08:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.332158 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.332207 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.332221 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.332240 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.332260 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:37Z","lastTransitionTime":"2025-11-26T00:08:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.434358 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.434401 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.434430 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.434447 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.434455 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:37Z","lastTransitionTime":"2025-11-26T00:08:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.528693 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.528787 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.528829 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:37 crc kubenswrapper[4875]: E1126 00:08:37.528947 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:37 crc kubenswrapper[4875]: E1126 00:08:37.529073 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:37 crc kubenswrapper[4875]: E1126 00:08:37.529131 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.536223 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.536261 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.536271 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.536333 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.536365 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:37Z","lastTransitionTime":"2025-11-26T00:08:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.539763 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/kube-rbac-proxy-crio-crc"] Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.546056 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.556705 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.570651 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.580247 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.593754 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:15Z\\\",\\\"message\\\":\\\"2025-11-26T00:07:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2\\\\n2025-11-26T00:07:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2 to /host/opt/cni/bin/\\\\n2025-11-26T00:07:30Z [verbose] multus-daemon started\\\\n2025-11-26T00:07:30Z [verbose] Readiness Indicator file check\\\\n2025-11-26T00:08:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:08:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.612057 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.626304 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.638038 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.638627 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.638672 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.638685 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.638702 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.638713 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:37Z","lastTransitionTime":"2025-11-26T00:08:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.647980 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.660171 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d127797-c509-4d81-950c-2e4bc7f82786\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec22a9fda336e3f85bcbaf3c8fee2d1096b9f9eaede26d1246858bf393ad5b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fffb96f456bc7cb372dceab2ad7310da04e2487d43530da595c2b865ec8e02a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11193f8290474fdb88f54086c6e42c01f4630df442b28819c4917165ef46a5cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.671291 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.688669 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:26Z\\\",\\\"message\\\":\\\"ssIP event handler 8\\\\nI1126 00:08:26.489054 6866 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 00:08:26.489070 6866 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 00:08:26.489032 6866 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 00:08:26.489082 6866 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 00:08:26.489101 6866 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 00:08:26.489148 6866 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 00:08:26.489202 6866 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 00:08:26.489201 6866 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 00:08:26.489235 6866 factory.go:656] Stopping watch factory\\\\nI1126 00:08:26.489259 6866 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 00:08:26.489294 6866 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 00:08:26.489847 6866 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 00:08:26.490022 6866 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 00:08:26.490104 6866 ovnkube.go:599] Stopped ovnkube\\\\nI1126 00:08:26.490210 6866 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:08:26.490449 6866 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:08:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.698648 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.719499 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.747886 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.747927 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.747939 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.747956 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.747968 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:37Z","lastTransitionTime":"2025-11-26T00:08:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.765350 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.780444 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.790476 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.801355 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:37Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.850115 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.850150 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.850158 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.850172 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.850197 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:37Z","lastTransitionTime":"2025-11-26T00:08:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.952722 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.952792 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.952834 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.952857 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:37 crc kubenswrapper[4875]: I1126 00:08:37.952872 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:37Z","lastTransitionTime":"2025-11-26T00:08:37Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.054911 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.054947 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.054955 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.054968 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.054980 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:38Z","lastTransitionTime":"2025-11-26T00:08:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.157170 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.157208 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.157220 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.157237 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.157249 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:38Z","lastTransitionTime":"2025-11-26T00:08:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.259242 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.259335 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.259353 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.259375 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.259390 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:38Z","lastTransitionTime":"2025-11-26T00:08:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.361757 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.361800 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.361812 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.361829 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.361841 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:38Z","lastTransitionTime":"2025-11-26T00:08:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.464071 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.464178 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.464193 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.464210 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.464223 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:38Z","lastTransitionTime":"2025-11-26T00:08:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.528238 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:38 crc kubenswrapper[4875]: E1126 00:08:38.528644 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.566957 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.567012 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.567029 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.567051 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.567066 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:38Z","lastTransitionTime":"2025-11-26T00:08:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.669075 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.669122 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.669134 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.669155 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.669166 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:38Z","lastTransitionTime":"2025-11-26T00:08:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.771934 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.771973 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.771982 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.772011 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.772021 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:38Z","lastTransitionTime":"2025-11-26T00:08:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.874109 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.874151 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.874163 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.874179 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.874191 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:38Z","lastTransitionTime":"2025-11-26T00:08:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.976489 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.976598 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.976613 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.976629 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:38 crc kubenswrapper[4875]: I1126 00:08:38.976651 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:38Z","lastTransitionTime":"2025-11-26T00:08:38Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.079371 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.079433 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.079446 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.079463 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.079474 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:39Z","lastTransitionTime":"2025-11-26T00:08:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.182007 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.182052 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.182070 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.182088 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.182100 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:39Z","lastTransitionTime":"2025-11-26T00:08:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.283946 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.283988 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.284018 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.284037 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.284046 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:39Z","lastTransitionTime":"2025-11-26T00:08:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.386070 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.386116 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.386124 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.386140 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.386151 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:39Z","lastTransitionTime":"2025-11-26T00:08:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.489346 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.489466 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.489482 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.489502 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.489517 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:39Z","lastTransitionTime":"2025-11-26T00:08:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.528536 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.528573 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.528536 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:39 crc kubenswrapper[4875]: E1126 00:08:39.528678 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:39 crc kubenswrapper[4875]: E1126 00:08:39.528781 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:39 crc kubenswrapper[4875]: E1126 00:08:39.528866 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.592479 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.592512 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.592522 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.592538 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.592549 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:39Z","lastTransitionTime":"2025-11-26T00:08:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.694670 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.694757 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.694778 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.694810 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.694831 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:39Z","lastTransitionTime":"2025-11-26T00:08:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.797788 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.797878 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.797901 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.797935 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.797957 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:39Z","lastTransitionTime":"2025-11-26T00:08:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.901478 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.901533 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.901582 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.901598 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:39 crc kubenswrapper[4875]: I1126 00:08:39.901608 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:39Z","lastTransitionTime":"2025-11-26T00:08:39Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.004056 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.004149 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.004174 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.004208 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.004234 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:40Z","lastTransitionTime":"2025-11-26T00:08:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.108404 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.108555 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.108580 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.108616 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.108640 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:40Z","lastTransitionTime":"2025-11-26T00:08:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.212073 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.212118 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.212132 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.212149 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.212163 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:40Z","lastTransitionTime":"2025-11-26T00:08:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.316017 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.316081 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.316095 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.316147 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.316164 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:40Z","lastTransitionTime":"2025-11-26T00:08:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.419732 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.419783 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.419797 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.419815 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.419830 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:40Z","lastTransitionTime":"2025-11-26T00:08:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.524129 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.524203 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.524215 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.524234 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.524248 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:40Z","lastTransitionTime":"2025-11-26T00:08:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.528553 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:40 crc kubenswrapper[4875]: E1126 00:08:40.528858 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.628366 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.628453 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.628465 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.628489 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.628511 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:40Z","lastTransitionTime":"2025-11-26T00:08:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.732324 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.732384 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.732393 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.732438 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.732451 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:40Z","lastTransitionTime":"2025-11-26T00:08:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.836080 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.836156 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.836192 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.836213 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.836235 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:40Z","lastTransitionTime":"2025-11-26T00:08:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.939464 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.939528 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.939542 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.939569 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:40 crc kubenswrapper[4875]: I1126 00:08:40.939585 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:40Z","lastTransitionTime":"2025-11-26T00:08:40Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.042993 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.043048 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.043066 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.043094 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.043118 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:41Z","lastTransitionTime":"2025-11-26T00:08:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.145866 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.145925 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.145937 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.145955 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.145965 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:41Z","lastTransitionTime":"2025-11-26T00:08:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.249002 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.249074 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.249091 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.249118 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.249138 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:41Z","lastTransitionTime":"2025-11-26T00:08:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.351665 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.351751 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.351765 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.351789 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.351822 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:41Z","lastTransitionTime":"2025-11-26T00:08:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.454853 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.454902 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.454911 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.454927 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.454939 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:41Z","lastTransitionTime":"2025-11-26T00:08:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.528483 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.528641 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:41 crc kubenswrapper[4875]: E1126 00:08:41.528808 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.528879 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:41 crc kubenswrapper[4875]: E1126 00:08:41.529115 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:41 crc kubenswrapper[4875]: E1126 00:08:41.529169 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.529923 4875 scope.go:117] "RemoveContainer" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" Nov 26 00:08:41 crc kubenswrapper[4875]: E1126 00:08:41.530102 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.557264 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.557301 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.557316 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.557330 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.557341 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:41Z","lastTransitionTime":"2025-11-26T00:08:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.660505 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.660568 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.660581 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.660602 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.660619 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:41Z","lastTransitionTime":"2025-11-26T00:08:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.765572 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.765677 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.765702 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.765779 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.765847 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:41Z","lastTransitionTime":"2025-11-26T00:08:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.869861 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.869917 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.869927 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.869947 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.869962 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:41Z","lastTransitionTime":"2025-11-26T00:08:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.972866 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.972951 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.972971 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.973003 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:41 crc kubenswrapper[4875]: I1126 00:08:41.973027 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:41Z","lastTransitionTime":"2025-11-26T00:08:41Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.076386 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.076491 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.076511 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.076540 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.076559 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:42Z","lastTransitionTime":"2025-11-26T00:08:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.178983 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.179027 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.179036 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.179051 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.179060 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:42Z","lastTransitionTime":"2025-11-26T00:08:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.281223 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.281266 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.281280 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.281296 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.281307 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:42Z","lastTransitionTime":"2025-11-26T00:08:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.384373 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.384460 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.384471 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.384492 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.384504 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:42Z","lastTransitionTime":"2025-11-26T00:08:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.486962 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.487010 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.487019 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.487036 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.487046 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:42Z","lastTransitionTime":"2025-11-26T00:08:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.528859 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:42 crc kubenswrapper[4875]: E1126 00:08:42.529003 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.589534 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.589566 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.589575 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.589598 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.589610 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:42Z","lastTransitionTime":"2025-11-26T00:08:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.692304 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.692350 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.692359 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.692375 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.692388 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:42Z","lastTransitionTime":"2025-11-26T00:08:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.794655 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.794688 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.794696 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.794711 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.794719 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:42Z","lastTransitionTime":"2025-11-26T00:08:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.897217 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.897258 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.897266 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.897281 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:42 crc kubenswrapper[4875]: I1126 00:08:42.897291 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:42Z","lastTransitionTime":"2025-11-26T00:08:42Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.000399 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.000496 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.000509 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.000529 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.000553 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:43Z","lastTransitionTime":"2025-11-26T00:08:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.103339 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.103716 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.103730 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.103747 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.103760 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:43Z","lastTransitionTime":"2025-11-26T00:08:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.206708 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.206766 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.206778 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.206797 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.206809 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:43Z","lastTransitionTime":"2025-11-26T00:08:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.309879 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.309925 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.309934 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.309949 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.309959 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:43Z","lastTransitionTime":"2025-11-26T00:08:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.412747 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.412815 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.412831 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.412856 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.412871 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:43Z","lastTransitionTime":"2025-11-26T00:08:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.515115 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.515156 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.515166 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.515180 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.515189 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:43Z","lastTransitionTime":"2025-11-26T00:08:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.528685 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.528727 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:43 crc kubenswrapper[4875]: E1126 00:08:43.528828 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.528857 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:43 crc kubenswrapper[4875]: E1126 00:08:43.529045 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:43 crc kubenswrapper[4875]: E1126 00:08:43.529131 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.617279 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.617314 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.617322 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.617334 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.617343 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:43Z","lastTransitionTime":"2025-11-26T00:08:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.719488 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.719530 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.719540 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.719554 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.719568 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:43Z","lastTransitionTime":"2025-11-26T00:08:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.821635 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.821683 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.821696 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.821723 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.821735 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:43Z","lastTransitionTime":"2025-11-26T00:08:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.923841 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.923885 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.923894 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.923909 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:43 crc kubenswrapper[4875]: I1126 00:08:43.923922 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:43Z","lastTransitionTime":"2025-11-26T00:08:43Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.026343 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.026382 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.026393 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.026426 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.026438 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:44Z","lastTransitionTime":"2025-11-26T00:08:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.129661 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.129737 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.129764 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.129794 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.129820 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:44Z","lastTransitionTime":"2025-11-26T00:08:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.232448 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.232481 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.232490 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.232503 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.232511 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:44Z","lastTransitionTime":"2025-11-26T00:08:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.334657 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.334708 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.334719 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.334733 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.334746 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:44Z","lastTransitionTime":"2025-11-26T00:08:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.436885 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.436933 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.436942 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.436958 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.436970 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:44Z","lastTransitionTime":"2025-11-26T00:08:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.528809 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:44 crc kubenswrapper[4875]: E1126 00:08:44.529010 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.538644 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.538689 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.538698 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.538713 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.538721 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:44Z","lastTransitionTime":"2025-11-26T00:08:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.641603 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.641672 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.641694 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.641724 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.641741 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:44Z","lastTransitionTime":"2025-11-26T00:08:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.744955 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.745002 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.745014 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.745030 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.745041 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:44Z","lastTransitionTime":"2025-11-26T00:08:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.847845 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.847897 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.847910 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.847926 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.847939 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:44Z","lastTransitionTime":"2025-11-26T00:08:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.951154 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.951215 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.951237 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.951266 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:44 crc kubenswrapper[4875]: I1126 00:08:44.951287 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:44Z","lastTransitionTime":"2025-11-26T00:08:44Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.053815 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.053874 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.053893 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.053916 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.053932 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.156072 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.156117 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.156128 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.156143 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.156152 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.258851 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.258906 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.258916 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.258929 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.258938 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.361554 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.361618 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.361640 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.361685 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.361715 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.464061 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.464109 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.464127 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.464149 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.464166 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.529029 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.529037 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.529157 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:45 crc kubenswrapper[4875]: E1126 00:08:45.529238 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:45 crc kubenswrapper[4875]: E1126 00:08:45.529480 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:45 crc kubenswrapper[4875]: E1126 00:08:45.529605 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.541776 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:45 crc kubenswrapper[4875]: E1126 00:08:45.542748 4875 secret.go:188] Couldn't get secret openshift-multus/metrics-daemon-secret: object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:08:45 crc kubenswrapper[4875]: E1126 00:08:45.542928 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs podName:3b640f09-f96d-4197-97be-f50e211e484d nodeName:}" failed. No retries permitted until 2025-11-26 00:09:49.542897358 +0000 UTC m=+162.732873093 (durationBeforeRetry 1m4s). Error: MountVolume.SetUp failed for volume "metrics-certs" (UniqueName: "kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs") pod "network-metrics-daemon-jhkp7" (UID: "3b640f09-f96d-4197-97be-f50e211e484d") : object "openshift-multus"/"metrics-daemon-secret" not registered Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.566340 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.566379 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.566392 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.566429 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.566444 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.668206 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.668241 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.668252 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.668268 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.668280 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.758997 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.759029 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.759038 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.759054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.759065 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: E1126 00:08:45.770127 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:45Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.774104 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.774134 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.774142 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.774156 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.774164 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: E1126 00:08:45.789553 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:45Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.792911 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.792960 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.793011 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.793033 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.793049 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: E1126 00:08:45.807572 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:45Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.811755 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.811791 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.811801 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.811816 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.811826 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: E1126 00:08:45.827133 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:45Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.831345 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.831379 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.831387 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.831400 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.831412 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: E1126 00:08:45.844798 4875 kubelet_node_status.go:585] "Error updating node status, will retry" err="failed to patch status \"{\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"type\\\":\\\"DiskPressure\\\"},{\\\"type\\\":\\\"PIDPressure\\\"},{\\\"type\\\":\\\"Ready\\\"}],\\\"allocatable\\\":{\\\"cpu\\\":\\\"11800m\\\",\\\"ephemeral-storage\\\":\\\"76396645454\\\",\\\"memory\\\":\\\"32404560Ki\\\"},\\\"capacity\\\":{\\\"cpu\\\":\\\"12\\\",\\\"ephemeral-storage\\\":\\\"83293888Ki\\\",\\\"memory\\\":\\\"32865360Ki\\\"},\\\"conditions\\\":[{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient memory available\\\",\\\"reason\\\":\\\"KubeletHasSufficientMemory\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"MemoryPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has no disk pressure\\\",\\\"reason\\\":\\\"KubeletHasNoDiskPressure\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"DiskPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"kubelet has sufficient PID available\\\",\\\"reason\\\":\\\"KubeletHasSufficientPID\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PIDPressure\\\"},{\\\"lastHeartbeatTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:45Z\\\",\\\"message\\\":\\\"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?\\\",\\\"reason\\\":\\\"KubeletNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"}],\\\"images\\\":[{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b9ea248f8ca33258fe1683da51d2b16b94630be1b361c65f68a16c1a34b94887\\\"],\\\"sizeBytes\\\":2887430265},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:4a62fa1c0091f6d94e8fb7258470b9a532d78364b6b51a05341592041d598562\\\",\\\"registry.redhat.io/redhat/redhat-operator-index@sha256:8db792bab418e30d9b71b9e1ac330ad036025257abbd2cd32f318ed14f70d6ac\\\",\\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1523204510},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\"],\\\"sizeBytes\\\":1498102846},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\"],\\\"sizeBytes\\\":1232839934},{\\\"names\\\":[\\\"registry.redhat.io/redhat/community-operator-index@sha256:8ff55cdb2367f5011074d2f5ebdc153b8885e7495e14ae00f99d2b7ab3584ade\\\",\\\"registry.redhat.io/redhat/community-operator-index@sha256:d656c1453f2261d9b800f5c69fba3bc2ffdb388414c4c0e89fcbaa067d7614c4\\\",\\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1151049424},{\\\"names\\\":[\\\"registry.redhat.io/redhat/certified-operator-index@sha256:1d7d4739b2001bd173f2632d5f73724a5034237ee2d93a02a21bbfff547002ba\\\",\\\"registry.redhat.io/redhat/certified-operator-index@sha256:7688bce5eb0d153adff87fc9f7a47642465c0b88208efb236880197969931b37\\\",\\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"],\\\"sizeBytes\\\":1032059094},{\\\"names\\\":[\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:0878ac12c537fcfc617a539b3b8bd329ba568bb49c6e3bb47827b177c47ae669\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index@sha256:1dc15c170ebf462dacaef75511740ed94ca1da210f3980f66d77f91ba201c875\\\",\\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"],\\\"sizeBytes\\\":1001152198},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\"],\\\"sizeBytes\\\":964552795},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\"],\\\"sizeBytes\\\":947616130},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c3cc3840d7a81ce1b420f06e07a923861faf37d9c10688aa3aa0b7b76c8706ad\\\"],\\\"sizeBytes\\\":907837715},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:101f295e2eae0755ae1865f7de885db1f17b9368e4120a713bb5f79e17ce8f93\\\"],\\\"sizeBytes\\\":854694423},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:47b0670fa1051335fd2d2c9e8361e4ed77c7760c33a2180b136f7c7f59863ec2\\\"],\\\"sizeBytes\\\":852490370},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:862f4a4bed52f372056b6d368e2498ebfb063075b31cf48dbdaaeedfcf0396cb\\\"],\\\"sizeBytes\\\":772592048},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\"],\\\"sizeBytes\\\":705793115},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\"],\\\"sizeBytes\\\":687915987},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f247257b0885cf5d303e3612c7714b33ae51404cfa2429822060c6c025eb17dd\\\"],\\\"sizeBytes\\\":668060419},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\"],\\\"sizeBytes\\\":613826183},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e3e9dc0b02b9351edf7c46b1d46d724abd1ac38ecbd6bc541cee84a209258d8\\\"],\\\"sizeBytes\\\":581863411},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\"],\\\"sizeBytes\\\":574606365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ee8d8f089ec1488067444c7e276c4e47cc93840280f3b3295484d67af2232002\\\"],\\\"sizeBytes\\\":550676059},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:10f20a39f16ae3019c62261eda8beb9e4d8c36cbb7b500b3bae1312987f0685d\\\"],\\\"sizeBytes\\\":541458174},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\"],\\\"sizeBytes\\\":533092226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\"],\\\"sizeBytes\\\":528023732},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\"],\\\"sizeBytes\\\":510867594},{\\\"names\\\":[\\\"quay.io/crcont/ocp-release@sha256:0b6ae0d091d2bf49f9b3a3aff54aabdc49e70c783780f118789f49d8f95a9e03\\\"],\\\"sizeBytes\\\":510526836},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\"],\\\"sizeBytes\\\":507459597},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7e9e7dd2b1a8394b7490ca6df8a3ee8cdfc6193ecc6fb6173ed9a1868116a207\\\"],\\\"sizeBytes\\\":505721947},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:094bb6a6641b4edbaf932f0551bcda20b0d4e012cbe84207348b24eeabd351e9\\\"],\\\"sizeBytes\\\":504778226},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c69fe7a98a744b7a7b61b2a8db81a338f373cd2b1d46c6d3f02864b30c37e46c\\\"],\\\"sizeBytes\\\":504735878},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e51e6f78ec20ef91c82e94a49f950e427e77894e582dcc406eec4df807ddd76e\\\"],\\\"sizeBytes\\\":502943148},{\\\"names\\\":[\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\"],\\\"sizeBytes\\\":501379880},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:3a741253807c962189819d879b8fef94a9452fb3f5f3969ec3207eb2d9862205\\\"],\\\"sizeBytes\\\":500472212},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\"],\\\"sizeBytes\\\":498888951},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5aa9e5379bfeb63f4e517fb45168eb6820138041641bbdfc6f4db6427032fa37\\\"],\\\"sizeBytes\\\":497832828},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\"],\\\"sizeBytes\\\":497742284},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:88b1f0a05a1b1c91e1212b40f0e7d04c9351ec9d34c52097bfdc5897b46f2f0e\\\"],\\\"sizeBytes\\\":497120598},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:737e9019a072c74321e0a909ca95481f5c545044dd4f151a34d0e1c8b9cf273f\\\"],\\\"sizeBytes\\\":488494681},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:fe009d03910e18795e3bd60a3fd84938311d464d2730a2af5ded5b24e4d05a6b\\\"],\\\"sizeBytes\\\":487097366},{\\\"names\\\":[\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:66760a53b64d381940757ca9f0d05f523a61f943f8da03ce9791e5d05264a736\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner@sha256:e97a0cb5b6119a9735efe0ac24630a8912fcad89a1dddfa76dc10edac4ec9815\\\",\\\"registry.redhat.io/openshift4/ose-csi-external-provisioner:latest\\\"],\\\"sizeBytes\\\":485998616},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\"],\\\"sizeBytes\\\":485767738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:898cae57123c5006d397b24af21b0f24a0c42c9b0be5ee8251e1824711f65820\\\"],\\\"sizeBytes\\\":485535312},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:1eda5ad6a6c5b9cd94b4b456e9116f4a0517241b614de1a99df14baee20c3e6a\\\"],\\\"sizeBytes\\\":479585218},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:487c0a8d5200bcdce484ab1169229d8fcb8e91a934be45afff7819c4f7612f57\\\"],\\\"sizeBytes\\\":476681373},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:b641ed0d63034b23d07eb0b2cd455390e83b186e77375e2d3f37633c1ddb0495\\\"],\\\"sizeBytes\\\":473958144},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:32f9e10dfb8a7c812ea8b3e71a42bed9cef05305be18cc368b666df4643ba717\\\"],\\\"sizeBytes\\\":463179365},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:8fdf28927b06a42ea8af3985d558c84d9efd142bb32d3892c4fa9f5e0d98133c\\\"],\\\"sizeBytes\\\":460774792},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:dd0628f89ad843d82d5abfdc543ffab6a861a23cc3005909bd88fa7383b71113\\\"],\\\"sizeBytes\\\":459737917},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\"],\\\"sizeBytes\\\":457588564},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:adabc3456bf4f799f893d792cdf9e8cbc735b070be346552bcc99f741b0a83aa\\\"],\\\"sizeBytes\\\":450637738},{\\\"names\\\":[\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:342dca43b5b09123737ccda5e41b4a5d564e54333d8ce04d867d3fb968600317\\\"],\\\"sizeBytes\\\":448887027}],\\\"nodeInfo\\\":{\\\"bootID\\\":\\\"e98cdd5c-4f13-4403-b492-aac02592afb8\\\",\\\"systemUUID\\\":\\\"9f6d4e71-289a-40d7-bad1-b955dbea75de\\\"}}}\" for node \"crc\": Internal error occurred: failed calling webhook \"node.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/node?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:45Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:45 crc kubenswrapper[4875]: E1126 00:08:45.844958 4875 kubelet_node_status.go:572] "Unable to update node status" err="update node status exceeds retry count" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.846502 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.846550 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.846567 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.846593 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.846609 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.949201 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.949236 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.949245 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.949260 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:45 crc kubenswrapper[4875]: I1126 00:08:45.949270 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:45Z","lastTransitionTime":"2025-11-26T00:08:45Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.050948 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.051004 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.051023 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.051047 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.051064 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:46Z","lastTransitionTime":"2025-11-26T00:08:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.153786 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.153825 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.153834 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.153849 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.153861 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:46Z","lastTransitionTime":"2025-11-26T00:08:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.255869 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.255917 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.255928 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.255944 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.255957 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:46Z","lastTransitionTime":"2025-11-26T00:08:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.358292 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.358373 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.358384 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.358402 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.358431 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:46Z","lastTransitionTime":"2025-11-26T00:08:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.460653 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.460694 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.460703 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.460719 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.460730 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:46Z","lastTransitionTime":"2025-11-26T00:08:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.528358 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:46 crc kubenswrapper[4875]: E1126 00:08:46.528546 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.563290 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.563342 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.563357 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.563378 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.563399 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:46Z","lastTransitionTime":"2025-11-26T00:08:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.666424 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.666501 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.666536 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.666552 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.666562 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:46Z","lastTransitionTime":"2025-11-26T00:08:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.769221 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.769261 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.769272 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.769288 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.769299 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:46Z","lastTransitionTime":"2025-11-26T00:08:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.872149 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.872220 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.872233 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.872254 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.872266 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:46Z","lastTransitionTime":"2025-11-26T00:08:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.982000 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.982125 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.982143 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.982169 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:46 crc kubenswrapper[4875]: I1126 00:08:46.982187 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:46Z","lastTransitionTime":"2025-11-26T00:08:46Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.085936 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.086017 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.086035 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.086063 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.086083 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:47Z","lastTransitionTime":"2025-11-26T00:08:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.190891 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.190958 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.190976 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.191007 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.191043 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:47Z","lastTransitionTime":"2025-11-26T00:08:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.294094 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.294142 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.294153 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.294176 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.294194 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:47Z","lastTransitionTime":"2025-11-26T00:08:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.397484 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.397551 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.397569 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.397592 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.397606 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:47Z","lastTransitionTime":"2025-11-26T00:08:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.501076 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.501145 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.501183 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.501218 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.501244 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:47Z","lastTransitionTime":"2025-11-26T00:08:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.529000 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.529070 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.529216 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:47 crc kubenswrapper[4875]: E1126 00:08:47.529212 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:47 crc kubenswrapper[4875]: E1126 00:08:47.529338 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:47 crc kubenswrapper[4875]: E1126 00:08:47.529452 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.552786 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"f6e20b72-5757-4560-ab3e-61ad3a451be8\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://08b34767b95b984666c42ed51106c7d43766e36ce3f5f4e743d8a40451536f82\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c0f9da410c07372b6c9ad6a79379b491cd10fdee88051c026b084652d85aed21\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cluster-policy-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://9641d3d437fd7125bbf21d140d2e6853aed997e53f3f6be8133ce94c0d4a4653\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://5086422d5107d7bba0da4b6f9b3e75966b3ee5833d93e8f5baa87e59f99e4578\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-controller-manager-operator@sha256:8506ce0a578bc18fac117eb2b82799488ffac0bed08287faaf92edaf5d17ab95\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-controller-manager-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-controller-manager\"/\"kube-controller-manager-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.572197 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"7c3902c5-28fd-457b-bbf8-e12ea98ef45f\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://dd927fcd02e9f4b19ca442007a070dbc4ec8b26fe5e3dbd6e65f171616aebddc\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-crio\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kube\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://79de50f8565ebaa0603152ebfb03022b2bd21faba4ad3fb999735de1a2249a1b\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://79de50f8565ebaa0603152ebfb03022b2bd21faba4ad3fb999735de1a2249a1b\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var\\\",\\\"name\\\":\\\"var-lib-kubelet\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"kube-rbac-proxy-crio-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.593975 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b6479f0-333b-4a96-9adf-2099afdc2447\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [network-check-target-container]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"network-check-target-container\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-cqllr\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-target-xd92c\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.604025 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.604075 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.604093 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.604111 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.604155 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:47Z","lastTransitionTime":"2025-11-26T00:08:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.615453 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [networking-console-plugin]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:ae647598ec35cda5766806d3d44a91e3b9d4dee48ff154f3d8490165399873fd\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"networking-console-plugin\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/cert\\\",\\\"name\\\":\\\"networking-console-plugin-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/nginx/nginx.conf\\\",\\\"name\\\":\\\"nginx-conf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-console\"/\"networking-console-plugin-85b44fc459-gdk6g\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.634547 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"3b640f09-f96d-4197-97be-f50e211e484d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"message\\\":\\\"containers with unready status: [network-metrics-daemon kube-rbac-proxy]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/metrics\\\",\\\"name\\\":\\\"metrics-certs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:d98bb346a17feae024d92663df92b25c120938395ab7043afbed543c6db9ca8d\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-metrics-daemon\\\",\\\"ready\\\":false,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-wmjfj\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:41Z\\\"}}\" for pod \"openshift-multus\"/\"network-metrics-daemon-jhkp7\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.669217 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-etcd/etcd-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c771ff97-2ac6-43b4-976e-06fb4909c83b\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:11Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://2a0c30f49af86d8affd5039728fe2adf22324b93d617dd5c63dfc73aed0ed70e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7f839a108e04ab5f68757b2f5661fe0643d88bbd33493cc800d9c6826d0e69c4\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://7ef02df42efda33d3f123cfe925bb0d1adab00fecbfd8711962c4bdf458898e1\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd/\\\",\\\"name\\\":\\\"log-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://c7650f30df5f9f908d3dc068e88e04892dce7a88f63ecc6a52901a36ba7bed5f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:a0fa3723269019bee1847b26702f42928e779036cc2f58408f8ee7866be30a93\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-rev\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:12Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/lib/etcd\\\",\\\"name\\\":\\\"data-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://aac38d8e30fa7e53d316dd4092f7210c7976bd44f77d2a21ab0ed53e1f6c69f3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcdctl\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:11Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/manifests\\\",\\\"name\\\":\\\"static-pod-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/lib/etcd/\\\",\\\"name\\\":\\\"data-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://7fd7909056f1735035ecf413ce9b2b3c58916e37043ee39f7764dfa9520b1e49\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/etcd\\\",\\\"name\\\":\\\"log-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-ensure-env-vars\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://cac58bb1db4dd92b2e554a254404f1911640855a08a08fc44e3677930f083ac6\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}}},{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:07b7c6877441ecd6a5646fb68e33e9be8b90092272e49117b54b4a67314731ca\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"etcd-resources-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://c2328281e583b11b4496986b2fc709ecbe1f5b30eba15ffdd98530f266307f4f\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:10Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/usr/local/bin\\\",\\\"name\\\":\\\"usr-local-bin\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-etcd\"/\"etcd-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.695667 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/network-operator-58b4c7f79c-55gtf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"37a5e44f-9a88-4405-be8a-b645485e7312\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://e0dde34d63f530305d33e43053c6475f1e1a5a794722700a85758a4d3d3ef5ae\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"network-operator\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"host-etc-kube\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/serving-cert\\\",\\\"name\\\":\\\"metrics-tls\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rdwmf\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"network-operator-58b4c7f79c-55gtf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.706706 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.706760 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.706774 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.706793 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.706804 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:47Z","lastTransitionTime":"2025-11-26T00:08:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.711064 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-operator/iptables-alerter-4ln5h" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"d75a4c96-2883-4a0b-bab2-0fab2b6c0b49\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://5231f747090cf07f92f8f56d3c43191b581098758208e6901b28de04113ab0c5\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"iptables-alerter\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/iptables-alerter\\\",\\\"name\\\":\\\"iptables-alerter-script\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-rczfb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-operator\"/\"iptables-alerter-4ln5h\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.728072 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-node-identity/network-node-identity-vrzqb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"ef543e1b-8068-4ea3-b32a-61027b32e95d\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://a19f36bfb94e5e0000f6e930c5160e799d4a3e334d2b119dcc6879fac1a4a98e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"approver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b9dfa9b8751b0d639d299539f76b3a57c46b730d3960851344162019cb4843f0\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"webhook\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/webhook-cert/\\\",\\\"name\\\":\\\"webhook-cert\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/ovnkube-identity-config\\\",\\\"name\\\":\\\"ovnkube-identity-cm\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2kz5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}]}}\" for pod \"openshift-network-node-identity\"/\"network-node-identity-vrzqb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.744329 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-dns/node-resolver-hjshz" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"5e5a9914-f410-4b51-8e16-77e18f7b09fb\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6a43439ddb6f096b37261ab045b6a3ed923eefecb9c3122b00a87517605d0aaf\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:35512335ac39aed0f55b7f799f416f4f6445c20c1b19888cf2bb72bb276703f2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"dns-node-resolver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/hosts\\\",\\\"name\\\":\\\"hosts-file\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mm8dm\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-dns\"/\"node-resolver-hjshz\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.758499 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-8wvqp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9fd65949-6a8a-4828-baa4-1c88c71d5ed2\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:16Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:15Z\\\",\\\"message\\\":\\\"2025-11-26T00:07:29+00:00 [cnibincopy] Successfully copied files in /usr/src/multus-cni/rhel9/bin/ to /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2\\\\n2025-11-26T00:07:30+00:00 [cnibincopy] Successfully moved files in /host/opt/cni/bin/upgrade_1493b82f-90d3-4212-862b-1aab5d292ac2 to /host/opt/cni/bin/\\\\n2025-11-26T00:07:30Z [verbose] multus-daemon started\\\\n2025-11-26T00:07:30Z [verbose] Readiness Indicator file check\\\\n2025-11-26T00:08:15Z [error] have you checked that your default network is ready? still waiting for readinessindicatorfile @ /host/run/multus/cni/net.d/10-ovn-kubernetes.conf. pollimmediate error: timed out waiting for the condition\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"name\\\":\\\"kube-multus\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:08:16Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/run/multus/cni/net.d\\\",\\\"name\\\":\\\"multus-cni-dir\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/run/multus\\\",\\\"name\\\":\\\"multus-socket-dir-parent\\\"},{\\\"mountPath\\\":\\\"/run/k8s.cni.cncf.io\\\",\\\"name\\\":\\\"host-run-k8s-cni-cncf-io\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/bin\\\",\\\"name\\\":\\\"host-var-lib-cni-bin\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/multus\\\",\\\"name\\\":\\\"host-var-lib-cni-multus\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-var-lib-kubelet\\\"},{\\\"mountPath\\\":\\\"/hostroot\\\",\\\"name\\\":\\\"hostroot\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/net.d\\\",\\\"name\\\":\\\"multus-conf-dir\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d/multus.d\\\",\\\"name\\\":\\\"multus-daemon-config\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/cni/multus/certs\\\",\\\"name\\\":\\\"host-run-multus-certs\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes\\\",\\\"name\\\":\\\"etc-kubernetes\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-22mhs\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-8wvqp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.771011 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://546875ee0f2f4855eb47134061708606927a379fcc7e1c3492cffcc519bbb542\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/tls/private\\\",\\\"name\\\":\\\"proxy-tls\\\"},{\\\"mountPath\\\":\\\"/etc/kube-rbac-proxy\\\",\\\"name\\\":\\\"mcd-auth-proxy-config\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:c915fb8ba96e911699a1ae34a8e95ca8a9fbe1bf8c28fea177225c63a8bdfc0a\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"machine-config-daemon\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/rootfs\\\",\\\"name\\\":\\\"rootfs\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-jt2ft\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:27Z\\\"}}\" for pod \"openshift-machine-config-operator\"/\"machine-config-daemon-9mztb\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.786438 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"02732a7d-257e-4d74-8a08-cea35074fc4e\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:35Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:36Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ab59be8e3f5b8bfceb2d6bdb642d03f2049be71e3acd91d7b5dd155e471f7758\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:7eeaee65f2808b819eedb413bdcabb9144e12f0dd97f13fd1afba93a95b67b26\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-multus-additional-cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:35Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e40792096b162f0f9ce5f8362f51e5f8dea2c1ce4b1447235388416b5db7708c\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"egress-router-binary-copy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://4e3b873c5b8250adf9181dfd127b52107798b01c0e890fb253c66cd53ecc5b72\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:28Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:687fddfbb085a1688df312ce4ec8c857df9b2daed8ff4a7ed6163a1154afa2cc\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"cni-plugins\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://026ce63391051d02fdc645ad47f0898805af7dffaa165ada3cb945a29e657317\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:30Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/tuning/\\\",\\\"name\\\":\\\"tuning-conf-dir\\\"},{\\\"mountPath\\\":\\\"/sysctls\\\",\\\"name\\\":\\\"cni-sysctl-allowlist\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:383f4cceeeaead203bb2327fdd367c64b64d729d7fa93089f249e496fcef0c78\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"bond-cni-plugin\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3c4b53ff1c116cd17e31c1596bca8f60aae1b71f65b94174146e1e588b31c556\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:31Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:f567acb85146b5ed81451ec3e79f2de0c62e28c69b2eeade0abdf5d0c388e7aa\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"routeoverride-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://ea12ed5fc29c2aac6c9ef92a2a25fec0c4e70e08af3ede179f5217d74d71b6eb\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:32Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:32Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni-bincopy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://3ae97afc522626b22f3e17be097dedf5a0de4d858494aec5d3c24ddd9cf3da92\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/entrypoint\\\",\\\"name\\\":\\\"cni-binary-copy\\\"},{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/os-release\\\",\\\"name\\\":\\\"os-release\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:98100674616e54319f6713d742fd0c3bdbc84e6e6173e8ccf4a2473a714c2bc4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"whereabouts-cni\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://9814eca54d57195c625c4b69c341cf90e155117db81492f12ec88a894da9d432\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:34Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:34Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/host/opt/cni/bin\\\",\\\"name\\\":\\\"cnibin\\\"},{\\\"mountPath\\\":\\\"/host/etc/cni/net.d\\\",\\\"name\\\":\\\"system-cni-dir\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-6x47n\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-multus\"/\"multus-additional-cni-plugins-qvrsq\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.799858 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"40b82738-34d2-4c2f-99c3-ae1198e964dc\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:40Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:41Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://c52a486d839058df608b79c0f2e01f157347ddefbcefdda59cb44b073c1f776c\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-control-plane-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://b83a4af8fd03aab54351d9dc3f4b0f9a203aab768023dd1a416361d746473cdb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovnkube-cluster-manager\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:41Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s6mk2\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:40Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-control-plane-749d76644c-qhmvp\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.809312 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.809363 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.809376 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.809393 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.809404 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:47Z","lastTransitionTime":"2025-11-26T00:08:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.817049 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"c6c8a175-7cc7-4e45-8e37-d431994e5526\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:37Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"},{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-regeneration-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738\\\",\\\"exitCode\\\":255,\\\"finishedAt\\\":\\\"2025-11-26T00:07:26Z\\\",\\\"message\\\":\\\"espace): Get \\\\\\\"https://localhost:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\\\\\\\": dial tcp [::1]:6443: connect: connection refused\\\\nI1126 00:07:11.241353 1 builder.go:304] check-endpoints version 4.18.0-202502101302.p0.g763313c.assembly.stream.el9-763313c-763313c860ea43fcfc9b1ac00ebae096b57c078e\\\\nI1126 00:07:11.243535 1 dynamic_serving_content.go:116] \\\\\\\"Loaded a new cert/key pair\\\\\\\" name=\\\\\\\"serving-cert::/tmp/serving-cert-775517230/tls.crt::/tmp/serving-cert-775517230/tls.key\\\\\\\"\\\\nI1126 00:07:26.715442 1 requestheader_controller.go:247] Loaded a new request header values for RequestHeaderAuthRequestController\\\\nI1126 00:07:26.718387 1 maxinflight.go:139] \\\\\\\"Initialized nonMutatingChan\\\\\\\" len=400\\\\nI1126 00:07:26.718433 1 maxinflight.go:145] \\\\\\\"Initialized mutatingChan\\\\\\\" len=200\\\\nI1126 00:07:26.718455 1 maxinflight.go:116] \\\\\\\"Set denominator for readonly requests\\\\\\\" limit=400\\\\nI1126 00:07:26.718462 1 maxinflight.go:120] \\\\\\\"Set denominator for mutating requests\\\\\\\" limit=200\\\\nI1126 00:07:26.727473 1 secure_serving.go:57] Forcing use of http/1.1 only\\\\nW1126 00:07:26.727505 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727510 1 secure_serving.go:69] Use of insecure cipher 'TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256' detected.\\\\nW1126 00:07:26.727513 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_GCM_SHA256' detected.\\\\nW1126 00:07:26.727516 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_GCM_SHA384' detected.\\\\nW1126 00:07:26.727520 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_128_CBC_SHA' detected.\\\\nW1126 00:07:26.727524 1 secure_serving.go:69] Use of insecure cipher 'TLS_RSA_WITH_AES_256_CBC_SHA' detected.\\\\nI1126 00:07:26.727732 1 genericapiserver.go:533] MuxAndDiscoveryComplete has all endpoints registered and discovery information is complete\\\\nF1126 00:07:26.730962 1 cmd.go:182] pods \\\\\\\"kube-apiserver-crc\\\\\\\" not found\\\\n\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"name\\\":\\\"kube-apiserver-check-endpoints\\\",\\\"ready\\\":true,\\\"restartCount\\\":1,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:27Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\\\",\\\"image\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"imageID\\\":\\\"quay.io/crcont/openshift-crc-cluster-kube-apiserver-operator@sha256:9f36dc276e27753fc478274c7f7814a4f8945c987117ee1ea3b8e6355e6d7462\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-apiserver-insecure-readyz\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}}}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/log/kube-apiserver\\\",\\\"name\\\":\\\"audit-dir\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-apiserver\"/\"kube-apiserver-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.830348 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8d127797-c509-4d81-950c-2e4bc7f82786\\\"},\\\"status\\\":{\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:09Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:08:03Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:07Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodScheduled\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://ec22a9fda336e3f85bcbaf3c8fee2d1096b9f9eaede26d1246858bf393ad5b55\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:09Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://fffb96f456bc7cb372dceab2ad7310da04e2487d43530da595c2b865ec8e02a3\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-cert-syncer\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]},{\\\"containerID\\\":\\\"cri-o://11193f8290474fdb88f54086c6e42c01f4630df442b28819c4917165ef46a5cb\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:5b881c97aa8e440c6b3ca001edfd789a9380066b8f11f35a8dd8d88c5c7dbf86\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-scheduler-recovery-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:10Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-resources\\\",\\\"name\\\":\\\"resource-dir\\\"},{\\\"mountPath\\\":\\\"/etc/kubernetes/static-pod-certs\\\",\\\"name\\\":\\\"cert-dir\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:06bc35825771aee1220d34720243b89c4ba8a8b335e6de2597126bd791fd90d4\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"wait-for-host-port\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://34b0e5244d5a03f2b673959e7a5d9ec63a0d3a77dc9915cc1e1e0d73cdcfabee\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:08Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:08Z\\\"}}}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:07Z\\\"}}\" for pod \"openshift-kube-scheduler\"/\"openshift-kube-scheduler-crc\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.845582 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"9d751cbb-f2e2-430d-9754-c882a5e924a5\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:27Z\\\",\\\"message\\\":\\\"containers with unready status: [check-endpoints]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:e1baa38811c04bd8909e01a1f3be7421a1cb99d608d3dc4cf86d95b17de2ab8b\\\",\\\"imageID\\\":\\\"\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"exitCode\\\":137,\\\"finishedAt\\\":null,\\\"message\\\":\\\"The container could not be located when the pod was deleted. The container used to be Running\\\",\\\"reason\\\":\\\"ContainerStatusUnknown\\\",\\\"startedAt\\\":null}},\\\"name\\\":\\\"check-endpoints\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"reason\\\":\\\"ContainerCreating\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-s2dwl\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"podIP\\\":null,\\\"podIPs\\\":null}}\" for pod \"openshift-network-diagnostics\"/\"network-check-source-55646444c4-trplf\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.870645 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"8fe08e0b-f339-4beb-953e-6e0180c6c945\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:28Z\\\",\\\"message\\\":\\\"containers with unready status: [ovnkube-controller]\\\",\\\"reason\\\":\\\"ContainersNotReady\\\",\\\"status\\\":\\\"False\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-node\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:242b3d66438c42745f4ef318bdeaf3d793426f12962a42ea83e18d06c08aaf09\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kube-rbac-proxy-ovn-metrics\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/pki/tls/metrics-cert\\\",\\\"name\\\":\\\"ovn-node-metrics-cert\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"nbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"northd\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-acl-logging\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"ovn-controller\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn/\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/dev/log\\\",\\\"name\\\":\\\"log-socket\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\\\",\\\"exitCode\\\":1,\\\"finishedAt\\\":\\\"2025-11-26T00:08:26Z\\\",\\\"message\\\":\\\"ssIP event handler 8\\\\nI1126 00:08:26.489054 6866 handler.go:208] Removed *v1.Node event handler 7\\\\nI1126 00:08:26.489070 6866 handler.go:208] Removed *v1.Node event handler 2\\\\nI1126 00:08:26.489032 6866 handler.go:190] Sending *v1.Namespace event handler 5 for removal\\\\nI1126 00:08:26.489082 6866 handler.go:190] Sending *v1.Namespace event handler 1 for removal\\\\nI1126 00:08:26.489101 6866 handler.go:208] Removed *v1.Namespace event handler 5\\\\nI1126 00:08:26.489148 6866 handler.go:190] Sending *v1.NetworkPolicy event handler 4 for removal\\\\nI1126 00:08:26.489202 6866 handler.go:190] Sending *v1.EgressIP event handler 8 for removal\\\\nI1126 00:08:26.489201 6866 handler.go:208] Removed *v1.Namespace event handler 1\\\\nI1126 00:08:26.489235 6866 factory.go:656] Stopping watch factory\\\\nI1126 00:08:26.489259 6866 handler.go:208] Removed *v1.EgressIP event handler 8\\\\nI1126 00:08:26.489294 6866 handler.go:208] Removed *v1.NetworkPolicy event handler 4\\\\nI1126 00:08:26.489847 6866 factory.go:1336] Added *v1.EgressFirewall event handler 9\\\\nI1126 00:08:26.490022 6866 controller.go:132] Adding controller ef_node_controller event handlers\\\\nI1126 00:08:26.490104 6866 ovnkube.go:599] Stopped ovnkube\\\\nI1126 00:08:26.490210 6866 metrics.go:553] Stopping metrics server at address \\\\\\\"127.0.0.1:29103\\\\\\\"\\\\nF1126 00:08:26.490449 6866 ovnkube.go:\\\",\\\"reason\\\":\\\"Error\\\",\\\"startedAt\\\":\\\"2025-11-26T00:08:25Z\\\"}},\\\"name\\\":\\\"ovnkube-controller\\\",\\\"ready\\\":false,\\\"restartCount\\\":3,\\\"started\\\":false,\\\"state\\\":{\\\"waiting\\\":{\\\"message\\\":\\\"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\\\",\\\"reason\\\":\\\"CrashLoopBackOff\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/var/lib/kubelet\\\",\\\"name\\\":\\\"host-kubelet\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/etc/systemd/system\\\",\\\"name\\\":\\\"systemd-units\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/host\\\",\\\"name\\\":\\\"host-slash\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/ovn-kubernetes/\\\",\\\"name\\\":\\\"host-run-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/netns\\\",\\\"name\\\":\\\"host-run-netns\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/run/systemd/private\\\",\\\"name\\\":\\\"run-systemd\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"},{\\\"mountPath\\\":\\\"/cni-bin-dir\\\",\\\"name\\\":\\\"host-cni-bin\\\"},{\\\"mountPath\\\":\\\"/etc/cni/net.d\\\",\\\"name\\\":\\\"host-cni-netd\\\"},{\\\"mountPath\\\":\\\"/var/lib/cni/networks/ovn-k8s-cni-overlay\\\",\\\"name\\\":\\\"host-var-lib-cni-networks-ovn-kubernetes\\\"},{\\\"mountPath\\\":\\\"/run/openvswitch\\\",\\\"name\\\":\\\"run-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/log/ovnkube/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/etc/openvswitch\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/lib/openvswitch\\\",\\\"name\\\":\\\"var-lib-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovnkube-config/\\\",\\\"name\\\":\\\"ovnkube-config\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]},{\\\"containerID\\\":\\\"cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"sbdb\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:33Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/ovnkube-lib\\\",\\\"name\\\":\\\"ovnkube-script-lib\\\"},{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/run/ovn/\\\",\\\"name\\\":\\\"run-ovn\\\"},{\\\"mountPath\\\":\\\"/var/log/ovn\\\",\\\"name\\\":\\\"node-log\\\"},{\\\"mountPath\\\":\\\"/env\\\",\\\"name\\\":\\\"env-overrides\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"initContainerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:174f36cdd47ef0d1d2099482919d773257453265a2af0b17b154edc32fa41ac2\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"kubecfg-setup\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":false,\\\"state\\\":{\\\"terminated\\\":{\\\"containerID\\\":\\\"cri-o://d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\\\",\\\"exitCode\\\":0,\\\"finishedAt\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"reason\\\":\\\"Completed\\\",\\\"startedAt\\\":\\\"2025-11-26T00:07:29Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/etc/ovn/\\\",\\\"name\\\":\\\"etc-openvswitch\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-mr9lb\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:28Z\\\"}}\" for pod \"openshift-ovn-kubernetes\"/\"ovnkube-node-hh8lx\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.882635 4875 status_manager.go:875] "Failed to update status for pod" pod="openshift-image-registry/node-ca-tvsgw" err="failed to patch status \"{\\\"metadata\\\":{\\\"uid\\\":\\\"be016325-bd90-4293-9d92-bc8cfba8a463\\\"},\\\"status\\\":{\\\"$setElementOrder/conditions\\\":[{\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"type\\\":\\\"Initialized\\\"},{\\\"type\\\":\\\"Ready\\\"},{\\\"type\\\":\\\"ContainersReady\\\"},{\\\"type\\\":\\\"PodScheduled\\\"}],\\\"conditions\\\":[{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"PodReadyToStartContainers\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:29Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Initialized\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"Ready\\\"},{\\\"lastProbeTime\\\":null,\\\"lastTransitionTime\\\":\\\"2025-11-26T00:07:31Z\\\",\\\"status\\\":\\\"True\\\",\\\"type\\\":\\\"ContainersReady\\\"}],\\\"containerStatuses\\\":[{\\\"containerID\\\":\\\"cri-o://6fecdc3efe4f0230352d90992e44266359ca7129f3654b43186f6e81bbe72ce2\\\",\\\"image\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"imageID\\\":\\\"quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:9fa29d188c85a8b1e1bd15c9c18e96f1b235da9bd4a45dbc086a4a69520ed63f\\\",\\\"lastState\\\":{},\\\"name\\\":\\\"node-ca\\\",\\\"ready\\\":true,\\\"restartCount\\\":0,\\\"started\\\":true,\\\"state\\\":{\\\"running\\\":{\\\"startedAt\\\":\\\"2025-11-26T00:07:30Z\\\"}},\\\"volumeMounts\\\":[{\\\"mountPath\\\":\\\"/tmp/serviceca\\\",\\\"name\\\":\\\"serviceca\\\"},{\\\"mountPath\\\":\\\"/etc/docker/certs.d\\\",\\\"name\\\":\\\"host\\\"},{\\\"mountPath\\\":\\\"/var/run/secrets/kubernetes.io/serviceaccount\\\",\\\"name\\\":\\\"kube-api-access-97pr5\\\",\\\"readOnly\\\":true,\\\"recursiveReadOnly\\\":\\\"Disabled\\\"}]}],\\\"hostIP\\\":\\\"192.168.126.11\\\",\\\"hostIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"phase\\\":\\\"Running\\\",\\\"podIP\\\":\\\"192.168.126.11\\\",\\\"podIPs\\\":[{\\\"ip\\\":\\\"192.168.126.11\\\"}],\\\"startTime\\\":\\\"2025-11-26T00:07:29Z\\\"}}\" for pod \"openshift-image-registry\"/\"node-ca-tvsgw\": Internal error occurred: failed calling webhook \"pod.network-node-identity.openshift.io\": failed to call webhook: Post \"https://127.0.0.1:9743/pod?timeout=10s\": tls: failed to verify certificate: x509: certificate has expired or is not yet valid: current time 2025-11-26T00:08:47Z is after 2025-08-24T17:21:41Z" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.911975 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.912056 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.912071 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.912090 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:47 crc kubenswrapper[4875]: I1126 00:08:47.912102 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:47Z","lastTransitionTime":"2025-11-26T00:08:47Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.015110 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.015199 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.015218 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.015251 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.015273 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:48Z","lastTransitionTime":"2025-11-26T00:08:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.118729 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.118787 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.118804 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.118828 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.118841 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:48Z","lastTransitionTime":"2025-11-26T00:08:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.221367 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.221403 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.221432 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.221449 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.221464 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:48Z","lastTransitionTime":"2025-11-26T00:08:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.324958 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.325000 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.325009 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.325024 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.325035 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:48Z","lastTransitionTime":"2025-11-26T00:08:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.427986 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.428032 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.428040 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.428054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.428064 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:48Z","lastTransitionTime":"2025-11-26T00:08:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.528929 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:48 crc kubenswrapper[4875]: E1126 00:08:48.529079 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.530193 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.530221 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.530233 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.530248 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.530264 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:48Z","lastTransitionTime":"2025-11-26T00:08:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.633544 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.633583 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.633592 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.633608 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.633617 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:48Z","lastTransitionTime":"2025-11-26T00:08:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.737624 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.737714 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.737740 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.737773 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.737795 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:48Z","lastTransitionTime":"2025-11-26T00:08:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.840574 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.840631 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.840643 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.840662 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.840680 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:48Z","lastTransitionTime":"2025-11-26T00:08:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.943445 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.943490 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.943500 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.943516 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:48 crc kubenswrapper[4875]: I1126 00:08:48.943525 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:48Z","lastTransitionTime":"2025-11-26T00:08:48Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.046971 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.047027 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.047040 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.047063 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.047078 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:49Z","lastTransitionTime":"2025-11-26T00:08:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.149801 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.149839 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.149848 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.149897 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.149908 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:49Z","lastTransitionTime":"2025-11-26T00:08:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.252553 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.252586 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.252594 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.252609 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.252619 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:49Z","lastTransitionTime":"2025-11-26T00:08:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.355802 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.355854 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.355867 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.355890 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.355906 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:49Z","lastTransitionTime":"2025-11-26T00:08:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.459243 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.459321 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.459331 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.459351 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.459364 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:49Z","lastTransitionTime":"2025-11-26T00:08:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.528859 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.529010 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.528859 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:49 crc kubenswrapper[4875]: E1126 00:08:49.529110 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:49 crc kubenswrapper[4875]: E1126 00:08:49.529391 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:49 crc kubenswrapper[4875]: E1126 00:08:49.529537 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.562671 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.562742 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.562762 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.562789 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.562808 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:49Z","lastTransitionTime":"2025-11-26T00:08:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.666582 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.666708 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.666736 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.666776 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.666803 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:49Z","lastTransitionTime":"2025-11-26T00:08:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.770556 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.770636 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.770659 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.770696 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.770722 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:49Z","lastTransitionTime":"2025-11-26T00:08:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.874332 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.874390 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.874405 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.874478 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.874500 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:49Z","lastTransitionTime":"2025-11-26T00:08:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.977989 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.978064 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.978093 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.978128 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:49 crc kubenswrapper[4875]: I1126 00:08:49.978162 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:49Z","lastTransitionTime":"2025-11-26T00:08:49Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.080503 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.080577 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.080600 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.080634 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.080657 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:50Z","lastTransitionTime":"2025-11-26T00:08:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.183726 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.183822 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.183861 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.183898 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.183925 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:50Z","lastTransitionTime":"2025-11-26T00:08:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.288271 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.288364 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.288387 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.288458 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.288481 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:50Z","lastTransitionTime":"2025-11-26T00:08:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.391269 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.391312 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.391321 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.391336 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.391346 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:50Z","lastTransitionTime":"2025-11-26T00:08:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.494606 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.494655 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.494682 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.494701 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.494714 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:50Z","lastTransitionTime":"2025-11-26T00:08:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.528897 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:50 crc kubenswrapper[4875]: E1126 00:08:50.529171 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.598111 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.598193 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.598219 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.598300 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.598330 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:50Z","lastTransitionTime":"2025-11-26T00:08:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.702524 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.702598 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.702616 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.702643 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.702662 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:50Z","lastTransitionTime":"2025-11-26T00:08:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.807026 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.807120 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.807145 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.807178 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.807201 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:50Z","lastTransitionTime":"2025-11-26T00:08:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.915148 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.915213 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.915231 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.915257 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:50 crc kubenswrapper[4875]: I1126 00:08:50.915277 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:50Z","lastTransitionTime":"2025-11-26T00:08:50Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.018255 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.018325 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.018344 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.018375 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.018453 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:51Z","lastTransitionTime":"2025-11-26T00:08:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.121670 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.121794 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.121823 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.121860 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.121882 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:51Z","lastTransitionTime":"2025-11-26T00:08:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.225593 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.225651 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.225663 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.225684 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.225697 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:51Z","lastTransitionTime":"2025-11-26T00:08:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.328448 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.328518 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.328553 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.328594 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.328621 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:51Z","lastTransitionTime":"2025-11-26T00:08:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.432098 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.432158 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.432178 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.432206 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.432228 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:51Z","lastTransitionTime":"2025-11-26T00:08:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.528759 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.529102 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.529114 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:51 crc kubenswrapper[4875]: E1126 00:08:51.529341 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:51 crc kubenswrapper[4875]: E1126 00:08:51.529498 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:51 crc kubenswrapper[4875]: E1126 00:08:51.529698 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.535198 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.535242 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.535252 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.535269 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.535281 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:51Z","lastTransitionTime":"2025-11-26T00:08:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.637383 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.637503 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.637518 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.637571 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.637586 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:51Z","lastTransitionTime":"2025-11-26T00:08:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.741463 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.741530 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.741548 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.741576 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.741597 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:51Z","lastTransitionTime":"2025-11-26T00:08:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.845834 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.845910 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.845925 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.845948 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.845962 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:51Z","lastTransitionTime":"2025-11-26T00:08:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.948760 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.948812 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.948825 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.948844 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:51 crc kubenswrapper[4875]: I1126 00:08:51.948858 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:51Z","lastTransitionTime":"2025-11-26T00:08:51Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.051916 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.051994 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.052019 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.052054 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.052079 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:52Z","lastTransitionTime":"2025-11-26T00:08:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.155940 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.156016 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.156034 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.156061 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.156082 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:52Z","lastTransitionTime":"2025-11-26T00:08:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.258638 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.258685 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.258696 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.258712 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.258723 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:52Z","lastTransitionTime":"2025-11-26T00:08:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.360918 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.360953 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.360964 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.360979 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.360989 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:52Z","lastTransitionTime":"2025-11-26T00:08:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.463725 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.463753 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.463763 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.463774 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.463783 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:52Z","lastTransitionTime":"2025-11-26T00:08:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.528510 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:52 crc kubenswrapper[4875]: E1126 00:08:52.529132 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.565973 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.566018 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.566029 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.566045 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.566056 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:52Z","lastTransitionTime":"2025-11-26T00:08:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.668446 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.668476 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.668486 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.668504 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.668513 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:52Z","lastTransitionTime":"2025-11-26T00:08:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.770680 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.770714 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.770723 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.770736 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.770744 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:52Z","lastTransitionTime":"2025-11-26T00:08:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.874180 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.874237 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.874250 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.874270 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.874284 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:52Z","lastTransitionTime":"2025-11-26T00:08:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.977503 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.977570 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.977589 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.977613 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:52 crc kubenswrapper[4875]: I1126 00:08:52.977626 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:52Z","lastTransitionTime":"2025-11-26T00:08:52Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.080238 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.080276 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.080285 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.080298 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.080307 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:53Z","lastTransitionTime":"2025-11-26T00:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.183888 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.183970 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.183993 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.184023 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.184048 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:53Z","lastTransitionTime":"2025-11-26T00:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.286904 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.286964 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.286977 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.286994 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.287006 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:53Z","lastTransitionTime":"2025-11-26T00:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.390119 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.390185 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.390207 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.390237 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.390261 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:53Z","lastTransitionTime":"2025-11-26T00:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.493041 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.493105 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.493124 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.493141 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.493151 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:53Z","lastTransitionTime":"2025-11-26T00:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.528011 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.528066 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.528089 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:53 crc kubenswrapper[4875]: E1126 00:08:53.528193 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:53 crc kubenswrapper[4875]: E1126 00:08:53.528305 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:53 crc kubenswrapper[4875]: E1126 00:08:53.528506 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.595277 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.595371 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.595396 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.595490 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.595518 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:53Z","lastTransitionTime":"2025-11-26T00:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.698723 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.698756 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.698765 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.698779 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.698788 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:53Z","lastTransitionTime":"2025-11-26T00:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.801658 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.801713 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.801729 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.801745 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.801756 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:53Z","lastTransitionTime":"2025-11-26T00:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.905963 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.906030 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.906052 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.906079 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:53 crc kubenswrapper[4875]: I1126 00:08:53.906099 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:53Z","lastTransitionTime":"2025-11-26T00:08:53Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.008916 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.008981 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.008993 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.009009 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.009025 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:54Z","lastTransitionTime":"2025-11-26T00:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.111846 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.111890 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.111899 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.111916 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.111925 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:54Z","lastTransitionTime":"2025-11-26T00:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.215351 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.215460 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.215486 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.215514 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.215538 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:54Z","lastTransitionTime":"2025-11-26T00:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.318496 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.318608 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.318628 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.318654 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.318671 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:54Z","lastTransitionTime":"2025-11-26T00:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.421271 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.421348 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.421378 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.421448 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.421473 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:54Z","lastTransitionTime":"2025-11-26T00:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.525059 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.525127 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.525145 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.525173 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.525193 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:54Z","lastTransitionTime":"2025-11-26T00:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.528397 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:54 crc kubenswrapper[4875]: E1126 00:08:54.528608 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.529900 4875 scope.go:117] "RemoveContainer" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" Nov 26 00:08:54 crc kubenswrapper[4875]: E1126 00:08:54.530234 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"ovnkube-controller\" with CrashLoopBackOff: \"back-off 40s restarting failed container=ovnkube-controller pod=ovnkube-node-hh8lx_openshift-ovn-kubernetes(8fe08e0b-f339-4beb-953e-6e0180c6c945)\"" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.627488 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.627551 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.627568 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.627585 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.627599 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:54Z","lastTransitionTime":"2025-11-26T00:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.730638 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.730689 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.730701 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.730721 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.730732 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:54Z","lastTransitionTime":"2025-11-26T00:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.834093 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.834137 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.834145 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.834158 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.834166 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:54Z","lastTransitionTime":"2025-11-26T00:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.936776 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.936836 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.936845 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.936863 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:54 crc kubenswrapper[4875]: I1126 00:08:54.936875 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:54Z","lastTransitionTime":"2025-11-26T00:08:54Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.040237 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.040296 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.040310 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.040328 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.040339 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:55Z","lastTransitionTime":"2025-11-26T00:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.142835 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.142880 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.142890 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.142907 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.142917 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:55Z","lastTransitionTime":"2025-11-26T00:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.245491 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.245547 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.245573 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.245595 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.245622 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:55Z","lastTransitionTime":"2025-11-26T00:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.348197 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.348249 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.348262 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.348281 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.348325 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:55Z","lastTransitionTime":"2025-11-26T00:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.450980 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.451032 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.451044 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.451060 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.451072 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:55Z","lastTransitionTime":"2025-11-26T00:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.527972 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.528016 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:55 crc kubenswrapper[4875]: E1126 00:08:55.528107 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.527973 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:55 crc kubenswrapper[4875]: E1126 00:08:55.528282 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:55 crc kubenswrapper[4875]: E1126 00:08:55.528324 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.553610 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.553683 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.553700 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.553723 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.553744 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:55Z","lastTransitionTime":"2025-11-26T00:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.656495 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.656539 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.656550 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.656565 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.656574 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:55Z","lastTransitionTime":"2025-11-26T00:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.758839 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.758879 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.758888 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.758901 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.758910 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:55Z","lastTransitionTime":"2025-11-26T00:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.862146 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.862207 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.862220 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.862240 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.862258 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:55Z","lastTransitionTime":"2025-11-26T00:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.964529 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.964561 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.964570 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.964585 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:55 crc kubenswrapper[4875]: I1126 00:08:55.964595 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:55Z","lastTransitionTime":"2025-11-26T00:08:55Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.066819 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.066844 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.066853 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.066866 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.066875 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:56Z","lastTransitionTime":"2025-11-26T00:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.169515 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.169561 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.169573 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.169587 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.169596 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:56Z","lastTransitionTime":"2025-11-26T00:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.221468 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientMemory" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.221540 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasNoDiskPressure" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.221559 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeHasSufficientPID" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.221589 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeNotReady" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.221611 4875 setters.go:603] "Node became not ready" node="crc" condition={"type":"Ready","status":"False","lastHeartbeatTime":"2025-11-26T00:08:56Z","lastTransitionTime":"2025-11-26T00:08:56Z","reason":"KubeletNotReady","message":"container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?"} Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.275472 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4"] Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.276844 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.279616 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.279963 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.280362 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.281217 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.311052 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-additional-cni-plugins-qvrsq" podStartSLOduration=89.31103091599999 podStartE2EDuration="1m29.311030916s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:56.308836789 +0000 UTC m=+109.498812494" watchObservedRunningTime="2025-11-26 00:08:56.311030916 +0000 UTC m=+109.501006611" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.311468 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podStartSLOduration=89.311458596 podStartE2EDuration="1m29.311458596s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:56.294400368 +0000 UTC m=+109.484376063" watchObservedRunningTime="2025-11-26 00:08:56.311458596 +0000 UTC m=+109.501434301" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.322668 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-control-plane-749d76644c-qhmvp" podStartSLOduration=88.322648324 podStartE2EDuration="1m28.322648324s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:56.321628848 +0000 UTC m=+109.511604543" watchObservedRunningTime="2025-11-26 00:08:56.322648324 +0000 UTC m=+109.512624019" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.339770 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=89.339751624 podStartE2EDuration="1m29.339751624s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:56.339051807 +0000 UTC m=+109.529027522" watchObservedRunningTime="2025-11-26 00:08:56.339751624 +0000 UTC m=+109.529727329" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.350226 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler/openshift-kube-scheduler-crc" podStartSLOduration=53.350218214 podStartE2EDuration="53.350218214s" podCreationTimestamp="2025-11-26 00:08:03 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:56.350040538 +0000 UTC m=+109.540016253" watchObservedRunningTime="2025-11-26 00:08:56.350218214 +0000 UTC m=+109.540193909" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.367281 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c006551-814c-4731-901f-6a6fa90e17bb-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.367336 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/0c006551-814c-4731-901f-6a6fa90e17bb-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.367396 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0c006551-814c-4731-901f-6a6fa90e17bb-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.367436 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/0c006551-814c-4731-901f-6a6fa90e17bb-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.367469 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0c006551-814c-4731-901f-6a6fa90e17bb-service-ca\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.411407 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/node-ca-tvsgw" podStartSLOduration=89.411383757 podStartE2EDuration="1m29.411383757s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:56.399940333 +0000 UTC m=+109.589916038" watchObservedRunningTime="2025-11-26 00:08:56.411383757 +0000 UTC m=+109.601359462" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.424697 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/kube-controller-manager-crc" podStartSLOduration=88.424679239 podStartE2EDuration="1m28.424679239s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:56.424286259 +0000 UTC m=+109.614261964" watchObservedRunningTime="2025-11-26 00:08:56.424679239 +0000 UTC m=+109.614654944" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.435210 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/kube-rbac-proxy-crio-crc" podStartSLOduration=19.43519387 podStartE2EDuration="19.43519387s" podCreationTimestamp="2025-11-26 00:08:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:56.434616555 +0000 UTC m=+109.624592270" watchObservedRunningTime="2025-11-26 00:08:56.43519387 +0000 UTC m=+109.625169585" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.468371 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0c006551-814c-4731-901f-6a6fa90e17bb-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.468464 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/0c006551-814c-4731-901f-6a6fa90e17bb-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.468492 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0c006551-814c-4731-901f-6a6fa90e17bb-service-ca\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.468557 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-ssl-certs\" (UniqueName: \"kubernetes.io/host-path/0c006551-814c-4731-901f-6a6fa90e17bb-etc-ssl-certs\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.468673 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c006551-814c-4731-901f-6a6fa90e17bb-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.468737 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/0c006551-814c-4731-901f-6a6fa90e17bb-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.468803 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-cvo-updatepayloads\" (UniqueName: \"kubernetes.io/host-path/0c006551-814c-4731-901f-6a6fa90e17bb-etc-cvo-updatepayloads\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.469523 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/0c006551-814c-4731-901f-6a6fa90e17bb-service-ca\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.474446 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/0c006551-814c-4731-901f-6a6fa90e17bb-serving-cert\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.492049 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/0c006551-814c-4731-901f-6a6fa90e17bb-kube-api-access\") pod \"cluster-version-operator-5c965bbfc6-72xx4\" (UID: \"0c006551-814c-4731-901f-6a6fa90e17bb\") " pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.507339 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd/etcd-crc" podStartSLOduration=87.507323786 podStartE2EDuration="1m27.507323786s" podCreationTimestamp="2025-11-26 00:07:29 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:56.507031869 +0000 UTC m=+109.697007564" watchObservedRunningTime="2025-11-26 00:08:56.507323786 +0000 UTC m=+109.697299481" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.528236 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:56 crc kubenswrapper[4875]: E1126 00:08:56.528367 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.550838 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/node-resolver-hjshz" podStartSLOduration=90.550821465 podStartE2EDuration="1m30.550821465s" podCreationTimestamp="2025-11-26 00:07:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:56.550512177 +0000 UTC m=+109.740487892" watchObservedRunningTime="2025-11-26 00:08:56.550821465 +0000 UTC m=+109.740797150" Nov 26 00:08:56 crc kubenswrapper[4875]: I1126 00:08:56.591711 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" Nov 26 00:08:57 crc kubenswrapper[4875]: I1126 00:08:57.059244 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" event={"ID":"0c006551-814c-4731-901f-6a6fa90e17bb","Type":"ContainerStarted","Data":"e6909b670454fec1306fc0bd0d0feff0302ed281e916ff1b6d39675df89d0617"} Nov 26 00:08:57 crc kubenswrapper[4875]: I1126 00:08:57.059299 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" event={"ID":"0c006551-814c-4731-901f-6a6fa90e17bb","Type":"ContainerStarted","Data":"b9d00786b208c460109c6406468459e64ebef5e3d7d1722ec1b0d3fdc65ed386"} Nov 26 00:08:57 crc kubenswrapper[4875]: I1126 00:08:57.077951 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-version/cluster-version-operator-5c965bbfc6-72xx4" podStartSLOduration=90.077926967 podStartE2EDuration="1m30.077926967s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:57.077444464 +0000 UTC m=+110.267420159" watchObservedRunningTime="2025-11-26 00:08:57.077926967 +0000 UTC m=+110.267902662" Nov 26 00:08:57 crc kubenswrapper[4875]: I1126 00:08:57.078625 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-8wvqp" podStartSLOduration=90.078618445 podStartE2EDuration="1m30.078618445s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:08:56.562968737 +0000 UTC m=+109.752944442" watchObservedRunningTime="2025-11-26 00:08:57.078618445 +0000 UTC m=+110.268594140" Nov 26 00:08:57 crc kubenswrapper[4875]: I1126 00:08:57.528739 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:57 crc kubenswrapper[4875]: I1126 00:08:57.528773 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:57 crc kubenswrapper[4875]: I1126 00:08:57.528739 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:57 crc kubenswrapper[4875]: E1126 00:08:57.528887 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:57 crc kubenswrapper[4875]: E1126 00:08:57.529954 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:57 crc kubenswrapper[4875]: E1126 00:08:57.530125 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:08:58 crc kubenswrapper[4875]: I1126 00:08:58.528298 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:08:58 crc kubenswrapper[4875]: E1126 00:08:58.528441 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:08:59 crc kubenswrapper[4875]: I1126 00:08:59.528615 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:08:59 crc kubenswrapper[4875]: E1126 00:08:59.529089 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:08:59 crc kubenswrapper[4875]: I1126 00:08:59.528619 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:08:59 crc kubenswrapper[4875]: I1126 00:08:59.528671 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:08:59 crc kubenswrapper[4875]: E1126 00:08:59.529459 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:08:59 crc kubenswrapper[4875]: E1126 00:08:59.529708 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:09:00 crc kubenswrapper[4875]: I1126 00:09:00.528145 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:00 crc kubenswrapper[4875]: E1126 00:09:00.528290 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:09:01 crc kubenswrapper[4875]: I1126 00:09:01.528832 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:01 crc kubenswrapper[4875]: I1126 00:09:01.528874 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:01 crc kubenswrapper[4875]: E1126 00:09:01.529050 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:09:01 crc kubenswrapper[4875]: I1126 00:09:01.529893 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:01 crc kubenswrapper[4875]: E1126 00:09:01.530088 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:09:01 crc kubenswrapper[4875]: E1126 00:09:01.530593 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:09:02 crc kubenswrapper[4875]: I1126 00:09:02.076281 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8wvqp_9fd65949-6a8a-4828-baa4-1c88c71d5ed2/kube-multus/1.log" Nov 26 00:09:02 crc kubenswrapper[4875]: I1126 00:09:02.076708 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8wvqp_9fd65949-6a8a-4828-baa4-1c88c71d5ed2/kube-multus/0.log" Nov 26 00:09:02 crc kubenswrapper[4875]: I1126 00:09:02.076787 4875 generic.go:334] "Generic (PLEG): container finished" podID="9fd65949-6a8a-4828-baa4-1c88c71d5ed2" containerID="8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424" exitCode=1 Nov 26 00:09:02 crc kubenswrapper[4875]: I1126 00:09:02.076823 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8wvqp" event={"ID":"9fd65949-6a8a-4828-baa4-1c88c71d5ed2","Type":"ContainerDied","Data":"8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424"} Nov 26 00:09:02 crc kubenswrapper[4875]: I1126 00:09:02.076908 4875 scope.go:117] "RemoveContainer" containerID="e107494e5b30ef73730bc20a4b4ae86c83caf1590f6a8044d1892e424e1559a8" Nov 26 00:09:02 crc kubenswrapper[4875]: I1126 00:09:02.077336 4875 scope.go:117] "RemoveContainer" containerID="8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424" Nov 26 00:09:02 crc kubenswrapper[4875]: E1126 00:09:02.077478 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 10s restarting failed container=kube-multus pod=multus-8wvqp_openshift-multus(9fd65949-6a8a-4828-baa4-1c88c71d5ed2)\"" pod="openshift-multus/multus-8wvqp" podUID="9fd65949-6a8a-4828-baa4-1c88c71d5ed2" Nov 26 00:09:02 crc kubenswrapper[4875]: I1126 00:09:02.528453 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:02 crc kubenswrapper[4875]: E1126 00:09:02.528815 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:09:03 crc kubenswrapper[4875]: I1126 00:09:03.080874 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8wvqp_9fd65949-6a8a-4828-baa4-1c88c71d5ed2/kube-multus/1.log" Nov 26 00:09:03 crc kubenswrapper[4875]: I1126 00:09:03.528796 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:03 crc kubenswrapper[4875]: I1126 00:09:03.528822 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:03 crc kubenswrapper[4875]: E1126 00:09:03.528923 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:09:03 crc kubenswrapper[4875]: I1126 00:09:03.528963 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:03 crc kubenswrapper[4875]: E1126 00:09:03.528988 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:09:03 crc kubenswrapper[4875]: E1126 00:09:03.529041 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:09:04 crc kubenswrapper[4875]: I1126 00:09:04.528474 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:04 crc kubenswrapper[4875]: E1126 00:09:04.528618 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:09:05 crc kubenswrapper[4875]: I1126 00:09:05.528181 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:05 crc kubenswrapper[4875]: E1126 00:09:05.528389 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:09:05 crc kubenswrapper[4875]: I1126 00:09:05.529454 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:05 crc kubenswrapper[4875]: I1126 00:09:05.529489 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:05 crc kubenswrapper[4875]: E1126 00:09:05.530031 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:09:05 crc kubenswrapper[4875]: E1126 00:09:05.530219 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:09:06 crc kubenswrapper[4875]: I1126 00:09:06.528054 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:06 crc kubenswrapper[4875]: E1126 00:09:06.528498 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:09:06 crc kubenswrapper[4875]: I1126 00:09:06.528672 4875 scope.go:117] "RemoveContainer" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" Nov 26 00:09:07 crc kubenswrapper[4875]: I1126 00:09:07.094107 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/3.log" Nov 26 00:09:07 crc kubenswrapper[4875]: I1126 00:09:07.096124 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerStarted","Data":"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492"} Nov 26 00:09:07 crc kubenswrapper[4875]: I1126 00:09:07.097012 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:09:07 crc kubenswrapper[4875]: I1126 00:09:07.122098 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podStartSLOduration=99.122083548 podStartE2EDuration="1m39.122083548s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:07.121844932 +0000 UTC m=+120.311820627" watchObservedRunningTime="2025-11-26 00:09:07.122083548 +0000 UTC m=+120.312059233" Nov 26 00:09:07 crc kubenswrapper[4875]: I1126 00:09:07.260533 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-jhkp7"] Nov 26 00:09:07 crc kubenswrapper[4875]: I1126 00:09:07.260657 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:07 crc kubenswrapper[4875]: E1126 00:09:07.260751 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:09:07 crc kubenswrapper[4875]: E1126 00:09:07.473732 4875 kubelet_node_status.go:497] "Node not becoming ready in time after startup" Nov 26 00:09:07 crc kubenswrapper[4875]: I1126 00:09:07.527983 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:07 crc kubenswrapper[4875]: I1126 00:09:07.527994 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:07 crc kubenswrapper[4875]: I1126 00:09:07.528055 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:07 crc kubenswrapper[4875]: E1126 00:09:07.528987 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:09:07 crc kubenswrapper[4875]: E1126 00:09:07.529081 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:09:07 crc kubenswrapper[4875]: E1126 00:09:07.529116 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:09:07 crc kubenswrapper[4875]: E1126 00:09:07.677436 4875 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 00:09:09 crc kubenswrapper[4875]: I1126 00:09:09.528605 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:09 crc kubenswrapper[4875]: I1126 00:09:09.528718 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:09 crc kubenswrapper[4875]: I1126 00:09:09.528804 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:09 crc kubenswrapper[4875]: I1126 00:09:09.529019 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:09 crc kubenswrapper[4875]: E1126 00:09:09.529027 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:09:09 crc kubenswrapper[4875]: E1126 00:09:09.529161 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:09:09 crc kubenswrapper[4875]: E1126 00:09:09.529230 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:09:09 crc kubenswrapper[4875]: E1126 00:09:09.529275 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:09:11 crc kubenswrapper[4875]: I1126 00:09:11.529145 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:11 crc kubenswrapper[4875]: I1126 00:09:11.529201 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:11 crc kubenswrapper[4875]: I1126 00:09:11.529254 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:11 crc kubenswrapper[4875]: I1126 00:09:11.529184 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:11 crc kubenswrapper[4875]: E1126 00:09:11.529548 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:09:11 crc kubenswrapper[4875]: E1126 00:09:11.529882 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:09:11 crc kubenswrapper[4875]: E1126 00:09:11.530057 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:09:11 crc kubenswrapper[4875]: E1126 00:09:11.530151 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:09:12 crc kubenswrapper[4875]: E1126 00:09:12.678898 4875 kubelet.go:2916] "Container runtime network not ready" networkReady="NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" Nov 26 00:09:13 crc kubenswrapper[4875]: I1126 00:09:13.528317 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:13 crc kubenswrapper[4875]: E1126 00:09:13.528477 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:09:13 crc kubenswrapper[4875]: I1126 00:09:13.528528 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:13 crc kubenswrapper[4875]: I1126 00:09:13.528529 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:13 crc kubenswrapper[4875]: E1126 00:09:13.528835 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:09:13 crc kubenswrapper[4875]: E1126 00:09:13.528905 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:09:13 crc kubenswrapper[4875]: I1126 00:09:13.528971 4875 scope.go:117] "RemoveContainer" containerID="8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424" Nov 26 00:09:13 crc kubenswrapper[4875]: I1126 00:09:13.529557 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:13 crc kubenswrapper[4875]: E1126 00:09:13.529728 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:09:14 crc kubenswrapper[4875]: I1126 00:09:14.117864 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8wvqp_9fd65949-6a8a-4828-baa4-1c88c71d5ed2/kube-multus/1.log" Nov 26 00:09:14 crc kubenswrapper[4875]: I1126 00:09:14.118179 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8wvqp" event={"ID":"9fd65949-6a8a-4828-baa4-1c88c71d5ed2","Type":"ContainerStarted","Data":"72b992d4d593659316136ff4983256fa4cb0d025d16018153f4829f1003548ac"} Nov 26 00:09:15 crc kubenswrapper[4875]: I1126 00:09:15.528397 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:15 crc kubenswrapper[4875]: I1126 00:09:15.528458 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:15 crc kubenswrapper[4875]: I1126 00:09:15.528482 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:15 crc kubenswrapper[4875]: E1126 00:09:15.528605 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:09:15 crc kubenswrapper[4875]: I1126 00:09:15.528663 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:15 crc kubenswrapper[4875]: E1126 00:09:15.528822 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:09:15 crc kubenswrapper[4875]: E1126 00:09:15.529497 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:09:15 crc kubenswrapper[4875]: E1126 00:09:15.529131 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:09:17 crc kubenswrapper[4875]: I1126 00:09:17.528018 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:17 crc kubenswrapper[4875]: I1126 00:09:17.528032 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:17 crc kubenswrapper[4875]: I1126 00:09:17.528111 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:17 crc kubenswrapper[4875]: I1126 00:09:17.529396 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:17 crc kubenswrapper[4875]: E1126 00:09:17.529396 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-multus/network-metrics-daemon-jhkp7" podUID="3b640f09-f96d-4197-97be-f50e211e484d" Nov 26 00:09:17 crc kubenswrapper[4875]: E1126 00:09:17.529657 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" podUID="9d751cbb-f2e2-430d-9754-c882a5e924a5" Nov 26 00:09:17 crc kubenswrapper[4875]: E1126 00:09:17.529699 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-diagnostics/network-check-target-xd92c" podUID="3b6479f0-333b-4a96-9adf-2099afdc2447" Nov 26 00:09:17 crc kubenswrapper[4875]: E1126 00:09:17.529819 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="network is not ready: container runtime network not ready: NetworkReady=false reason:NetworkPluginNotReady message:Network plugin returns error: no CNI configuration file in /etc/kubernetes/cni/net.d/. Has your network provider started?" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" podUID="5fe485a1-e14f-4c09-b5b9-f252bc42b7e8" Nov 26 00:09:19 crc kubenswrapper[4875]: I1126 00:09:19.528794 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:19 crc kubenswrapper[4875]: I1126 00:09:19.528840 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:19 crc kubenswrapper[4875]: I1126 00:09:19.528794 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:19 crc kubenswrapper[4875]: I1126 00:09:19.528923 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:19 crc kubenswrapper[4875]: I1126 00:09:19.531505 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 00:09:19 crc kubenswrapper[4875]: I1126 00:09:19.531517 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 00:09:19 crc kubenswrapper[4875]: I1126 00:09:19.531750 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 00:09:19 crc kubenswrapper[4875]: I1126 00:09:19.531832 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 00:09:19 crc kubenswrapper[4875]: I1126 00:09:19.531964 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 00:09:19 crc kubenswrapper[4875]: I1126 00:09:19.532763 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.824843 4875 kubelet_node_status.go:724] "Recording event message for node" node="crc" event="NodeReady" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.864289 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-8fcp4"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.864805 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-pkmj5"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.865044 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.865296 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.865665 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.866096 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.866897 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-pruner-29401920-nz8v2"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.867275 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29401920-nz8v2" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.867333 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mx4v"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.867832 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.868590 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.881557 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.881976 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.882056 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.882394 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.884824 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.884949 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.885036 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.885043 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.885189 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"serviceca" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.885290 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.885477 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.885498 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.885606 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.885642 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.885946 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.886360 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.891330 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.891458 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.891608 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.891715 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.891804 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.891834 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.891906 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.891990 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"pruner-dockercfg-p7bcw" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.892089 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.892179 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.892196 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.892311 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.894785 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.895184 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.895325 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.895444 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.895606 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.896232 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.896305 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zww4t"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.896791 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-55rwl"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.896841 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.897686 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.897734 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.898293 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v9znd"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.898735 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.898900 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.899458 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.902458 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console-operator/console-operator-58897d9998-w9htq"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.902970 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903220 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/55a68700-c0f7-4ae5-a252-013c8fbab545-serviceca\") pod \"image-pruner-29401920-nz8v2\" (UID: \"55a68700-c0f7-4ae5-a252-013c8fbab545\") " pod="openshift-image-registry/image-pruner-29401920-nz8v2" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903267 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-md7bb\" (UniqueName: \"kubernetes.io/projected/d2c99872-a302-4599-96b7-2320051f069c-kube-api-access-md7bb\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903291 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f5k8q\" (UniqueName: \"kubernetes.io/projected/a1910aeb-3a24-4922-ada0-a47d871c0998-kube-api-access-f5k8q\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903345 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-config\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903367 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a1910aeb-3a24-4922-ada0-a47d871c0998-node-pullsecrets\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903389 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a1910aeb-3a24-4922-ada0-a47d871c0998-etcd-client\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903408 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2c99872-a302-4599-96b7-2320051f069c-config\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903448 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-config\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903473 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903505 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903496 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a1910aeb-3a24-4922-ada0-a47d871c0998-encryption-config\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903582 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a1910aeb-3a24-4922-ada0-a47d871c0998-audit-dir\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903607 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-client-ca\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903634 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/d2c99872-a302-4599-96b7-2320051f069c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903655 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-etcd-serving-ca\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903692 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-config\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903716 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-client-ca\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903742 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xhlvk\" (UniqueName: \"kubernetes.io/projected/55a68700-c0f7-4ae5-a252-013c8fbab545-kube-api-access-xhlvk\") pod \"image-pruner-29401920-nz8v2\" (UID: \"55a68700-c0f7-4ae5-a252-013c8fbab545\") " pod="openshift-image-registry/image-pruner-29401920-nz8v2" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903766 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8f5px\" (UniqueName: \"kubernetes.io/projected/42c80491-a1f7-4f1f-b2a0-db682fbc490e-kube-api-access-8f5px\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903801 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d2c99872-a302-4599-96b7-2320051f069c-images\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903822 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1910aeb-3a24-4922-ada0-a47d871c0998-serving-cert\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903843 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a569b99-f211-4204-a526-314e066143c2-serving-cert\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903867 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-trusted-ca-bundle\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903888 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6nmnh\" (UniqueName: \"kubernetes.io/projected/3a569b99-f211-4204-a526-314e066143c2-kube-api-access-6nmnh\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903910 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-image-import-ca\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903932 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-audit\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.903953 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42c80491-a1f7-4f1f-b2a0-db682fbc490e-serving-cert\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.904401 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.905774 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.905934 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.906033 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.906127 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.906184 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.906330 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.906364 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.906546 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.907120 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/downloads-7954f5f757-7s69q"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.907377 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.907582 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.907729 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7s69q" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.908220 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress/router-default-5444994796-bplfw"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.908651 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.908747 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.909016 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.909102 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.909213 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.909296 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.909331 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.909445 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.909774 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.909855 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.910135 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.910266 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.910324 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.910385 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.910464 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.910636 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.910896 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.911004 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.911090 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.911462 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.911820 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.912533 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.912708 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.912833 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.914958 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-8t466"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.915915 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-console/console-f9d7485db-l4gfb"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.916332 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.916639 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-8t466" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.916795 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.924319 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.924744 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.924764 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.925622 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.926437 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.926906 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.926939 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.927217 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.927983 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.928147 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.928291 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.928376 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.928441 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.928594 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.928738 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.930382 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.930650 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.931251 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.931399 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.931696 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.932025 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.932260 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.933083 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.933226 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.933233 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.933358 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.943557 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.943782 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.944319 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.946566 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.947252 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.948365 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.948646 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.951558 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.951905 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.951974 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.955718 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.956287 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.956813 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.957088 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.957297 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.957804 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.958891 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.959053 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.960882 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.961083 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.961094 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.961531 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.961920 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8pktj"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.962304 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.962527 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.962670 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.963078 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.963146 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.963695 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.965095 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.965531 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.965753 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6ml5v"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.966209 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.966574 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.967585 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-server-nt2m7"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.967699 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.968245 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-nt2m7" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.969230 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.971968 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-pq492"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.972908 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9r68s"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.973181 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.973678 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.977304 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.980462 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.981279 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.982254 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.982603 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.985472 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.997202 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-btrqx"] Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.997300 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" Nov 26 00:09:26 crc kubenswrapper[4875]: I1126 00:09:26.998267 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.001156 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.002741 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.002754 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.005888 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-serving-cert\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.005932 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/baf3ba36-cf4a-4780-b954-913da501aab7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.005972 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a1910aeb-3a24-4922-ada0-a47d871c0998-encryption-config\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006155 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0ac0929-e6e0-464c-bb52-aadcea8d2f3e-proxy-tls\") pod \"machine-config-controller-84d6567774-mpgjt\" (UID: \"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006260 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/baf3ba36-cf4a-4780-b954-913da501aab7-trusted-ca\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006748 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a1910aeb-3a24-4922-ada0-a47d871c0998-audit-dir\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006786 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36-node-bootstrap-token\") pod \"machine-config-server-nt2m7\" (UID: \"5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36\") " pod="openshift-machine-config-operator/machine-config-server-nt2m7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006809 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-client-ca\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006829 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24b77217-7ebc-4165-a7b5-d7171adbc29a-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8kdf7\" (UID: \"24b77217-7ebc-4165-a7b5-d7171adbc29a\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006845 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/baf3ba36-cf4a-4780-b954-913da501aab7-metrics-tls\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006863 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1eeb4a6d-f2da-4006-aa3a-37f3adb4937b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-gw6b8\" (UID: \"1eeb4a6d-f2da-4006-aa3a-37f3adb4937b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006887 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-59t7w\" (UniqueName: \"kubernetes.io/projected/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-kube-api-access-59t7w\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006906 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2f69fc9c-de93-4cae-a491-50698ac10599-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006982 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/a1910aeb-3a24-4922-ada0-a47d871c0998-audit-dir\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.006922 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2f69fc9c-de93-4cae-a491-50698ac10599-encryption-config\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007482 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4xwpm\" (UniqueName: \"kubernetes.io/projected/baf3ba36-cf4a-4780-b954-913da501aab7-kube-api-access-4xwpm\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007513 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-webhook-cert\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007537 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/d2c99872-a302-4599-96b7-2320051f069c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007556 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-etcd-serving-ca\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007575 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-v825q\" (UniqueName: \"kubernetes.io/projected/1da5021c-2176-41cb-9c3c-d4ccbe299664-kube-api-access-v825q\") pod \"migrator-59844c95c7-qxqtr\" (UID: \"1da5021c-2176-41cb-9c3c-d4ccbe299664\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007592 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36-certs\") pod \"machine-config-server-nt2m7\" (UID: \"5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36\") " pod="openshift-machine-config-operator/machine-config-server-nt2m7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007612 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0dc6d7a2-494d-462d-8972-de55d1e36f31-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007631 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/24b77217-7ebc-4165-a7b5-d7171adbc29a-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8kdf7\" (UID: \"24b77217-7ebc-4165-a7b5-d7171adbc29a\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007649 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-config\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007668 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2f69fc9c-de93-4cae-a491-50698ac10599-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007683 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1167d13e-be01-4849-a6e8-0b5f505a0caa-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bbsj7\" (UID: \"1167d13e-be01-4849-a6e8-0b5f505a0caa\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007718 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-serving-cert\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.007736 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-trusted-ca-bundle\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009106 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f415abb0-de0a-477d-96c0-31efea9964e2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-mgkmk\" (UID: \"f415abb0-de0a-477d-96c0-31efea9964e2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009152 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9djpq\" (UniqueName: \"kubernetes.io/projected/b0ac0929-e6e0-464c-bb52-aadcea8d2f3e-kube-api-access-9djpq\") pod \"machine-config-controller-84d6567774-mpgjt\" (UID: \"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009180 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0dc6d7a2-494d-462d-8972-de55d1e36f31-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009211 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5cnhn\" (UniqueName: \"kubernetes.io/projected/986802f7-9a9e-4191-8545-0ccda0c2bcf5-kube-api-access-5cnhn\") pod \"control-plane-machine-set-operator-78cbb6b69f-rln4j\" (UID: \"986802f7-9a9e-4191-8545-0ccda0c2bcf5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009242 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/49bb8ae9-ab85-458a-850e-8d3b48ab42de-serving-cert\") pod \"openshift-config-operator-7777fb866f-zww4t\" (UID: \"49bb8ae9-ab85-458a-850e-8d3b48ab42de\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009263 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/28e618ae-43ac-4fc4-83c2-28462346e2a9-machine-approver-tls\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009290 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-config\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009311 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-client-ca\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009333 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0ac0929-e6e0-464c-bb52-aadcea8d2f3e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-mpgjt\" (UID: \"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009355 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/49bb8ae9-ab85-458a-850e-8d3b48ab42de-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zww4t\" (UID: \"49bb8ae9-ab85-458a-850e-8d3b48ab42de\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009377 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xhlvk\" (UniqueName: \"kubernetes.io/projected/55a68700-c0f7-4ae5-a252-013c8fbab545-kube-api-access-xhlvk\") pod \"image-pruner-29401920-nz8v2\" (UID: \"55a68700-c0f7-4ae5-a252-013c8fbab545\") " pod="openshift-image-registry/image-pruner-29401920-nz8v2" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009376 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-client-ca\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009395 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-console-config\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009492 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-tmpfs\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.009860 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.010656 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-client-ca\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.010846 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-config\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.010955 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8f5px\" (UniqueName: \"kubernetes.io/projected/42c80491-a1f7-4f1f-b2a0-db682fbc490e-kube-api-access-8f5px\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011026 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d2c99872-a302-4599-96b7-2320051f069c-images\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011051 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1910aeb-3a24-4922-ada0-a47d871c0998-serving-cert\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011076 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a569b99-f211-4204-a526-314e066143c2-serving-cert\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011121 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-config\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011145 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xfdxj\" (UniqueName: \"kubernetes.io/projected/2f69fc9c-de93-4cae-a491-50698ac10599-kube-api-access-xfdxj\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011168 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1eeb4a6d-f2da-4006-aa3a-37f3adb4937b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-gw6b8\" (UID: \"1eeb4a6d-f2da-4006-aa3a-37f3adb4937b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011196 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/83b8aa62-fa25-4831-8fba-c08bb0887d14-signing-cabundle\") pod \"service-ca-9c57cc56f-pq492\" (UID: \"83b8aa62-fa25-4831-8fba-c08bb0887d14\") " pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011224 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75-profile-collector-cert\") pod \"catalog-operator-68c6474976-62q8r\" (UID: \"d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011251 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wkrv9\" (UniqueName: \"kubernetes.io/projected/4349f16a-1b36-45b5-aa93-f982bcaa781f-kube-api-access-wkrv9\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011284 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-ca\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011326 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-client\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011353 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c05d440a-597d-4b70-9408-2b3143be393d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fgpzs\" (UID: \"c05d440a-597d-4b70-9408-2b3143be393d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011384 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-oauth-serving-cert\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011441 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-trusted-ca-bundle\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011468 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6nmnh\" (UniqueName: \"kubernetes.io/projected/3a569b99-f211-4204-a526-314e066143c2-kube-api-access-6nmnh\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011501 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24b77217-7ebc-4165-a7b5-d7171adbc29a-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8kdf7\" (UID: \"24b77217-7ebc-4165-a7b5-d7171adbc29a\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011538 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-etcd-serving-ca\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011544 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-knmtn\" (UniqueName: \"kubernetes.io/projected/83b8aa62-fa25-4831-8fba-c08bb0887d14-kube-api-access-knmtn\") pod \"service-ca-9c57cc56f-pq492\" (UID: \"83b8aa62-fa25-4831-8fba-c08bb0887d14\") " pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011626 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-metrics-certs\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011656 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4349f16a-1b36-45b5-aa93-f982bcaa781f-console-oauth-config\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011685 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rzlfd\" (UniqueName: \"kubernetes.io/projected/f415abb0-de0a-477d-96c0-31efea9964e2-kube-api-access-rzlfd\") pod \"openshift-apiserver-operator-796bbdcf4f-mgkmk\" (UID: \"f415abb0-de0a-477d-96c0-31efea9964e2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011718 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-image-import-ca\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011749 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2f69fc9c-de93-4cae-a491-50698ac10599-etcd-client\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011775 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d752aecd-9a39-430c-9262-c81434b3f08a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-xfnw9\" (UID: \"d752aecd-9a39-430c-9262-c81434b3f08a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011801 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-audit\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011824 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42c80491-a1f7-4f1f-b2a0-db682fbc490e-serving-cert\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011850 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f415abb0-de0a-477d-96c0-31efea9964e2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-mgkmk\" (UID: \"f415abb0-de0a-477d-96c0-31efea9964e2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011878 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/55a68700-c0f7-4ae5-a252-013c8fbab545-serviceca\") pod \"image-pruner-29401920-nz8v2\" (UID: \"55a68700-c0f7-4ae5-a252-013c8fbab545\") " pod="openshift-image-registry/image-pruner-29401920-nz8v2" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011903 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2f69fc9c-de93-4cae-a491-50698ac10599-audit-policies\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011927 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-stats-auth\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011951 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d752aecd-9a39-430c-9262-c81434b3f08a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-xfnw9\" (UID: \"d752aecd-9a39-430c-9262-c81434b3f08a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.011981 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/83b8aa62-fa25-4831-8fba-c08bb0887d14-signing-key\") pod \"service-ca-9c57cc56f-pq492\" (UID: \"83b8aa62-fa25-4831-8fba-c08bb0887d14\") " pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012006 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-service-ca-bundle\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012033 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-md7bb\" (UniqueName: \"kubernetes.io/projected/d2c99872-a302-4599-96b7-2320051f069c-kube-api-access-md7bb\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012055 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f5k8q\" (UniqueName: \"kubernetes.io/projected/a1910aeb-3a24-4922-ada0-a47d871c0998-kube-api-access-f5k8q\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012077 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75-srv-cert\") pod \"catalog-operator-68c6474976-62q8r\" (UID: \"d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012105 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pqcsj\" (UniqueName: \"kubernetes.io/projected/28e618ae-43ac-4fc4-83c2-28462346e2a9-kube-api-access-pqcsj\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012140 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/85ee0261-b699-4159-9ee7-123628b1f1c0-images\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012170 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tzzsx\" (UniqueName: \"kubernetes.io/projected/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-kube-api-access-tzzsx\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012224 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/85ee0261-b699-4159-9ee7-123628b1f1c0-proxy-tls\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012252 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-service-ca\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012293 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2f69fc9c-de93-4cae-a491-50698ac10599-serving-cert\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012317 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0dc6d7a2-494d-462d-8972-de55d1e36f31-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012342 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28e618ae-43ac-4fc4-83c2-28462346e2a9-config\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012368 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4349f16a-1b36-45b5-aa93-f982bcaa781f-console-serving-cert\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012393 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-service-ca\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012435 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/85ee0261-b699-4159-9ee7-123628b1f1c0-auth-proxy-config\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012463 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-76jc9\" (UniqueName: \"kubernetes.io/projected/85ee0261-b699-4159-9ee7-123628b1f1c0-kube-api-access-76jc9\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012492 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1167d13e-be01-4849-a6e8-0b5f505a0caa-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bbsj7\" (UID: \"1167d13e-be01-4849-a6e8-0b5f505a0caa\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012520 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hvm5c\" (UniqueName: \"kubernetes.io/projected/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-kube-api-access-hvm5c\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012542 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2n24j\" (UniqueName: \"kubernetes.io/projected/d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75-kube-api-access-2n24j\") pod \"catalog-operator-68c6474976-62q8r\" (UID: \"d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012567 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dzsn6\" (UniqueName: \"kubernetes.io/projected/0dc6d7a2-494d-462d-8972-de55d1e36f31-kube-api-access-dzsn6\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012592 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jql29\" (UniqueName: \"kubernetes.io/projected/c05d440a-597d-4b70-9408-2b3143be393d-kube-api-access-jql29\") pod \"package-server-manager-789f6589d5-fgpzs\" (UID: \"c05d440a-597d-4b70-9408-2b3143be393d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012628 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bm8dl\" (UniqueName: \"kubernetes.io/projected/d752aecd-9a39-430c-9262-c81434b3f08a-kube-api-access-bm8dl\") pod \"kube-storage-version-migrator-operator-b67b599dd-xfnw9\" (UID: \"d752aecd-9a39-430c-9262-c81434b3f08a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012666 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w7xd4\" (UniqueName: \"kubernetes.io/projected/5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36-kube-api-access-w7xd4\") pod \"machine-config-server-nt2m7\" (UID: \"5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36\") " pod="openshift-machine-config-operator/machine-config-server-nt2m7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012690 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-crbdp\" (UniqueName: \"kubernetes.io/projected/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-kube-api-access-crbdp\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012716 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9pgq7\" (UniqueName: \"kubernetes.io/projected/49bb8ae9-ab85-458a-850e-8d3b48ab42de-kube-api-access-9pgq7\") pod \"openshift-config-operator-7777fb866f-zww4t\" (UID: \"49bb8ae9-ab85-458a-850e-8d3b48ab42de\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012746 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-2dj2t\" (UniqueName: \"kubernetes.io/projected/b118a730-fb77-4632-83f3-9b94b4d52b2c-kube-api-access-2dj2t\") pod \"downloads-7954f5f757-7s69q\" (UID: \"b118a730-fb77-4632-83f3-9b94b4d52b2c\") " pod="openshift-console/downloads-7954f5f757-7s69q" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.012618 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/d2c99872-a302-4599-96b7-2320051f069c-images\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.014199 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-trusted-ca-bundle\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.015189 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-audit\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.016227 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-import-ca\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-image-import-ca\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.016284 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.016748 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/55a68700-c0f7-4ae5-a252-013c8fbab545-serviceca\") pod \"image-pruner-29401920-nz8v2\" (UID: \"55a68700-c0f7-4ae5-a252-013c8fbab545\") " pod="openshift-image-registry/image-pruner-29401920-nz8v2" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.016908 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-default-certificate\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.016972 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.017055 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-config\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.017121 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wfg4q\" (UniqueName: \"kubernetes.io/projected/1167d13e-be01-4849-a6e8-0b5f505a0caa-kube-api-access-wfg4q\") pod \"openshift-controller-manager-operator-756b6f6bc6-bbsj7\" (UID: \"1167d13e-be01-4849-a6e8-0b5f505a0caa\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.017161 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/986802f7-9a9e-4191-8545-0ccda0c2bcf5-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-rln4j\" (UID: \"986802f7-9a9e-4191-8545-0ccda0c2bcf5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.018218 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/a1910aeb-3a24-4922-ada0-a47d871c0998-config\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.018291 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42c80491-a1f7-4f1f-b2a0-db682fbc490e-serving-cert\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.018544 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a569b99-f211-4204-a526-314e066143c2-serving-cert\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.018897 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/a1910aeb-3a24-4922-ada0-a47d871c0998-serving-cert\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030004 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030508 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-service-ca-bundle\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030553 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/28e618ae-43ac-4fc4-83c2-28462346e2a9-auth-proxy-config\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030621 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a1910aeb-3a24-4922-ada0-a47d871c0998-node-pullsecrets\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030646 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-apiservice-cert\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030672 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1eeb4a6d-f2da-4006-aa3a-37f3adb4937b-config\") pod \"kube-controller-manager-operator-78b949d7b-gw6b8\" (UID: \"1eeb4a6d-f2da-4006-aa3a-37f3adb4937b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030693 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a1910aeb-3a24-4922-ada0-a47d871c0998-etcd-client\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030709 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2c99872-a302-4599-96b7-2320051f069c-config\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030726 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-config\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030741 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030760 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2f69fc9c-de93-4cae-a491-50698ac10599-audit-dir\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.030915 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/a1910aeb-3a24-4922-ada0-a47d871c0998-node-pullsecrets\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.031816 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-api-operator-tls\" (UniqueName: \"kubernetes.io/secret/d2c99872-a302-4599-96b7-2320051f069c-machine-api-operator-tls\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.032649 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-config\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.033204 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d2c99872-a302-4599-96b7-2320051f069c-config\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.033245 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.033652 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.034177 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-proxy-ca-bundles\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.035101 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/a1910aeb-3a24-4922-ada0-a47d871c0998-etcd-client\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.038633 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.049806 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/a1910aeb-3a24-4922-ada0-a47d871c0998-encryption-config\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.050800 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-tfknb"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.051895 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.051930 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-8fcp4"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.051939 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-pkmj5"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.051950 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29401920-nz8v2"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.052019 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.052332 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.056385 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-55rwl"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.058069 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.059541 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7s69q"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.060497 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v9znd"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.062364 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-8t466"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.064967 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.065015 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.066976 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zww4t"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.067780 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.067929 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mx4v"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.068372 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.069562 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-w9htq"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.070454 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.071452 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6ml5v"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.072612 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8pktj"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.074952 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wlmp9"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.077614 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-dns/dns-default-j5qr7"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.078136 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.084021 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.084057 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.084067 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.084077 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9r68s"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.084145 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.086482 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.087481 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.089488 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.089897 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.091381 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-l4gfb"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.093351 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-pq492"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.094114 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.094931 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.096069 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.097512 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.098402 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wlmp9"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.099906 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.100929 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.102280 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-j5qr7"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.103432 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-tfknb"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.104434 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-btrqx"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.105471 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ingress-canary/ingress-canary-d5dcs"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.106073 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-d5dcs" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.106685 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-d5dcs"] Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.107296 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.128047 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131312 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-oauth-serving-cert\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131341 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wkrv9\" (UniqueName: \"kubernetes.io/projected/4349f16a-1b36-45b5-aa93-f982bcaa781f-kube-api-access-wkrv9\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131363 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-ca\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131428 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-client\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131448 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c05d440a-597d-4b70-9408-2b3143be393d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fgpzs\" (UID: \"c05d440a-597d-4b70-9408-2b3143be393d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131486 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24b77217-7ebc-4165-a7b5-d7171adbc29a-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8kdf7\" (UID: \"24b77217-7ebc-4165-a7b5-d7171adbc29a\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131502 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-knmtn\" (UniqueName: \"kubernetes.io/projected/83b8aa62-fa25-4831-8fba-c08bb0887d14-kube-api-access-knmtn\") pod \"service-ca-9c57cc56f-pq492\" (UID: \"83b8aa62-fa25-4831-8fba-c08bb0887d14\") " pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131517 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-metrics-certs\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131533 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4349f16a-1b36-45b5-aa93-f982bcaa781f-console-oauth-config\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131550 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rzlfd\" (UniqueName: \"kubernetes.io/projected/f415abb0-de0a-477d-96c0-31efea9964e2-kube-api-access-rzlfd\") pod \"openshift-apiserver-operator-796bbdcf4f-mgkmk\" (UID: \"f415abb0-de0a-477d-96c0-31efea9964e2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131570 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2f69fc9c-de93-4cae-a491-50698ac10599-etcd-client\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131585 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d752aecd-9a39-430c-9262-c81434b3f08a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-xfnw9\" (UID: \"d752aecd-9a39-430c-9262-c81434b3f08a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131599 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f415abb0-de0a-477d-96c0-31efea9964e2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-mgkmk\" (UID: \"f415abb0-de0a-477d-96c0-31efea9964e2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131614 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2f69fc9c-de93-4cae-a491-50698ac10599-audit-policies\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131681 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-stats-auth\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131698 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d752aecd-9a39-430c-9262-c81434b3f08a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-xfnw9\" (UID: \"d752aecd-9a39-430c-9262-c81434b3f08a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131721 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-service-ca-bundle\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131738 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/83b8aa62-fa25-4831-8fba-c08bb0887d14-signing-key\") pod \"service-ca-9c57cc56f-pq492\" (UID: \"83b8aa62-fa25-4831-8fba-c08bb0887d14\") " pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131766 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75-srv-cert\") pod \"catalog-operator-68c6474976-62q8r\" (UID: \"d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131783 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pqcsj\" (UniqueName: \"kubernetes.io/projected/28e618ae-43ac-4fc4-83c2-28462346e2a9-kube-api-access-pqcsj\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131817 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/85ee0261-b699-4159-9ee7-123628b1f1c0-images\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131835 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tzzsx\" (UniqueName: \"kubernetes.io/projected/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-kube-api-access-tzzsx\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131858 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/85ee0261-b699-4159-9ee7-123628b1f1c0-proxy-tls\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131873 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-service-ca\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131887 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2f69fc9c-de93-4cae-a491-50698ac10599-serving-cert\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131905 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0dc6d7a2-494d-462d-8972-de55d1e36f31-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131926 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28e618ae-43ac-4fc4-83c2-28462346e2a9-config\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131949 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hvm5c\" (UniqueName: \"kubernetes.io/projected/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-kube-api-access-hvm5c\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131973 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4349f16a-1b36-45b5-aa93-f982bcaa781f-console-serving-cert\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.131995 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-service-ca\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132016 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/85ee0261-b699-4159-9ee7-123628b1f1c0-auth-proxy-config\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132071 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-76jc9\" (UniqueName: \"kubernetes.io/projected/85ee0261-b699-4159-9ee7-123628b1f1c0-kube-api-access-76jc9\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132165 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1167d13e-be01-4849-a6e8-0b5f505a0caa-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bbsj7\" (UID: \"1167d13e-be01-4849-a6e8-0b5f505a0caa\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132255 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2n24j\" (UniqueName: \"kubernetes.io/projected/d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75-kube-api-access-2n24j\") pod \"catalog-operator-68c6474976-62q8r\" (UID: \"d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132282 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dzsn6\" (UniqueName: \"kubernetes.io/projected/0dc6d7a2-494d-462d-8972-de55d1e36f31-kube-api-access-dzsn6\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132301 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jql29\" (UniqueName: \"kubernetes.io/projected/c05d440a-597d-4b70-9408-2b3143be393d-kube-api-access-jql29\") pod \"package-server-manager-789f6589d5-fgpzs\" (UID: \"c05d440a-597d-4b70-9408-2b3143be393d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132321 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w7xd4\" (UniqueName: \"kubernetes.io/projected/5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36-kube-api-access-w7xd4\") pod \"machine-config-server-nt2m7\" (UID: \"5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36\") " pod="openshift-machine-config-operator/machine-config-server-nt2m7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132337 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bm8dl\" (UniqueName: \"kubernetes.io/projected/d752aecd-9a39-430c-9262-c81434b3f08a-kube-api-access-bm8dl\") pod \"kube-storage-version-migrator-operator-b67b599dd-xfnw9\" (UID: \"d752aecd-9a39-430c-9262-c81434b3f08a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132357 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-2dj2t\" (UniqueName: \"kubernetes.io/projected/b118a730-fb77-4632-83f3-9b94b4d52b2c-kube-api-access-2dj2t\") pod \"downloads-7954f5f757-7s69q\" (UID: \"b118a730-fb77-4632-83f3-9b94b4d52b2c\") " pod="openshift-console/downloads-7954f5f757-7s69q" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132382 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-default-certificate\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132395 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"oauth-serving-cert\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-oauth-serving-cert\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132423 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-crbdp\" (UniqueName: \"kubernetes.io/projected/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-kube-api-access-crbdp\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132486 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9pgq7\" (UniqueName: \"kubernetes.io/projected/49bb8ae9-ab85-458a-850e-8d3b48ab42de-kube-api-access-9pgq7\") pod \"openshift-config-operator-7777fb866f-zww4t\" (UID: \"49bb8ae9-ab85-458a-850e-8d3b48ab42de\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132507 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132533 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wfg4q\" (UniqueName: \"kubernetes.io/projected/1167d13e-be01-4849-a6e8-0b5f505a0caa-kube-api-access-wfg4q\") pod \"openshift-controller-manager-operator-756b6f6bc6-bbsj7\" (UID: \"1167d13e-be01-4849-a6e8-0b5f505a0caa\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132553 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/986802f7-9a9e-4191-8545-0ccda0c2bcf5-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-rln4j\" (UID: \"986802f7-9a9e-4191-8545-0ccda0c2bcf5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132574 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-service-ca-bundle\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132597 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/28e618ae-43ac-4fc4-83c2-28462346e2a9-auth-proxy-config\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132621 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-apiservice-cert\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132640 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1eeb4a6d-f2da-4006-aa3a-37f3adb4937b-config\") pod \"kube-controller-manager-operator-78b949d7b-gw6b8\" (UID: \"1eeb4a6d-f2da-4006-aa3a-37f3adb4937b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132662 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2f69fc9c-de93-4cae-a491-50698ac10599-audit-dir\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132681 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-serving-cert\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132699 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/baf3ba36-cf4a-4780-b954-913da501aab7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132717 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/baf3ba36-cf4a-4780-b954-913da501aab7-trusted-ca\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132735 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0ac0929-e6e0-464c-bb52-aadcea8d2f3e-proxy-tls\") pod \"machine-config-controller-84d6567774-mpgjt\" (UID: \"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132758 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36-node-bootstrap-token\") pod \"machine-config-server-nt2m7\" (UID: \"5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36\") " pod="openshift-machine-config-operator/machine-config-server-nt2m7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132776 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24b77217-7ebc-4165-a7b5-d7171adbc29a-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8kdf7\" (UID: \"24b77217-7ebc-4165-a7b5-d7171adbc29a\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132791 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/baf3ba36-cf4a-4780-b954-913da501aab7-metrics-tls\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132852 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1eeb4a6d-f2da-4006-aa3a-37f3adb4937b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-gw6b8\" (UID: \"1eeb4a6d-f2da-4006-aa3a-37f3adb4937b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132874 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-59t7w\" (UniqueName: \"kubernetes.io/projected/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-kube-api-access-59t7w\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132892 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2f69fc9c-de93-4cae-a491-50698ac10599-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132907 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2f69fc9c-de93-4cae-a491-50698ac10599-encryption-config\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132922 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4xwpm\" (UniqueName: \"kubernetes.io/projected/baf3ba36-cf4a-4780-b954-913da501aab7-kube-api-access-4xwpm\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132940 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-webhook-cert\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132960 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-v825q\" (UniqueName: \"kubernetes.io/projected/1da5021c-2176-41cb-9c3c-d4ccbe299664-kube-api-access-v825q\") pod \"migrator-59844c95c7-qxqtr\" (UID: \"1da5021c-2176-41cb-9c3c-d4ccbe299664\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132974 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36-certs\") pod \"machine-config-server-nt2m7\" (UID: \"5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36\") " pod="openshift-machine-config-operator/machine-config-server-nt2m7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.132990 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0dc6d7a2-494d-462d-8972-de55d1e36f31-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133017 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-serving-cert\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133033 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/24b77217-7ebc-4165-a7b5-d7171adbc29a-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8kdf7\" (UID: \"24b77217-7ebc-4165-a7b5-d7171adbc29a\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133074 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-config\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133097 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2f69fc9c-de93-4cae-a491-50698ac10599-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133117 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1167d13e-be01-4849-a6e8-0b5f505a0caa-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bbsj7\" (UID: \"1167d13e-be01-4849-a6e8-0b5f505a0caa\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133129 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-service-ca-bundle\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133138 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-trusted-ca-bundle\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133154 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f415abb0-de0a-477d-96c0-31efea9964e2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-mgkmk\" (UID: \"f415abb0-de0a-477d-96c0-31efea9964e2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133171 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9djpq\" (UniqueName: \"kubernetes.io/projected/b0ac0929-e6e0-464c-bb52-aadcea8d2f3e-kube-api-access-9djpq\") pod \"machine-config-controller-84d6567774-mpgjt\" (UID: \"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133187 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0dc6d7a2-494d-462d-8972-de55d1e36f31-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133206 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5cnhn\" (UniqueName: \"kubernetes.io/projected/986802f7-9a9e-4191-8545-0ccda0c2bcf5-kube-api-access-5cnhn\") pod \"control-plane-machine-set-operator-78cbb6b69f-rln4j\" (UID: \"986802f7-9a9e-4191-8545-0ccda0c2bcf5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133223 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/49bb8ae9-ab85-458a-850e-8d3b48ab42de-serving-cert\") pod \"openshift-config-operator-7777fb866f-zww4t\" (UID: \"49bb8ae9-ab85-458a-850e-8d3b48ab42de\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133240 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/28e618ae-43ac-4fc4-83c2-28462346e2a9-machine-approver-tls\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133259 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0ac0929-e6e0-464c-bb52-aadcea8d2f3e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-mpgjt\" (UID: \"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133280 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-console-config\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133296 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-tmpfs\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133314 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/49bb8ae9-ab85-458a-850e-8d3b48ab42de-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zww4t\" (UID: \"49bb8ae9-ab85-458a-850e-8d3b48ab42de\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133351 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-config\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133367 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xfdxj\" (UniqueName: \"kubernetes.io/projected/2f69fc9c-de93-4cae-a491-50698ac10599-kube-api-access-xfdxj\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133382 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1eeb4a6d-f2da-4006-aa3a-37f3adb4937b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-gw6b8\" (UID: \"1eeb4a6d-f2da-4006-aa3a-37f3adb4937b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133399 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/83b8aa62-fa25-4831-8fba-c08bb0887d14-signing-cabundle\") pod \"service-ca-9c57cc56f-pq492\" (UID: \"83b8aa62-fa25-4831-8fba-c08bb0887d14\") " pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.133432 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75-profile-collector-cert\") pod \"catalog-operator-68c6474976-62q8r\" (UID: \"d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.137744 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-service-ca-bundle\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.138317 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/28e618ae-43ac-4fc4-83c2-28462346e2a9-auth-proxy-config\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.139037 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-metrics-certs\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.139082 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1eeb4a6d-f2da-4006-aa3a-37f3adb4937b-config\") pod \"kube-controller-manager-operator-78b949d7b-gw6b8\" (UID: \"1eeb4a6d-f2da-4006-aa3a-37f3adb4937b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.139161 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/2f69fc9c-de93-4cae-a491-50698ac10599-audit-dir\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.140137 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-serving-ca\" (UniqueName: \"kubernetes.io/configmap/2f69fc9c-de93-4cae-a491-50698ac10599-etcd-serving-ca\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.140561 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/2f69fc9c-de93-4cae-a491-50698ac10599-etcd-client\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.141129 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/28e618ae-43ac-4fc4-83c2-28462346e2a9-config\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.143712 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2f69fc9c-de93-4cae-a491-50698ac10599-serving-cert\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.144299 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/85ee0261-b699-4159-9ee7-123628b1f1c0-auth-proxy-config\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.144786 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"encryption-config\" (UniqueName: \"kubernetes.io/secret/2f69fc9c-de93-4cae-a491-50698ac10599-encryption-config\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.144940 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"available-featuregates\" (UniqueName: \"kubernetes.io/empty-dir/49bb8ae9-ab85-458a-850e-8d3b48ab42de-available-featuregates\") pod \"openshift-config-operator-7777fb866f-zww4t\" (UID: \"49bb8ae9-ab85-458a-850e-8d3b48ab42de\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.145987 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-serving-cert\" (UniqueName: \"kubernetes.io/secret/4349f16a-1b36-45b5-aa93-f982bcaa781f-console-serving-cert\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.146006 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/0dc6d7a2-494d-462d-8972-de55d1e36f31-trusted-ca\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.146206 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/2f69fc9c-de93-4cae-a491-50698ac10599-trusted-ca-bundle\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.146573 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-oauth-config\" (UniqueName: \"kubernetes.io/secret/4349f16a-1b36-45b5-aa93-f982bcaa781f-console-oauth-config\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.146920 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/f415abb0-de0a-477d-96c0-31efea9964e2-config\") pod \"openshift-apiserver-operator-796bbdcf4f-mgkmk\" (UID: \"f415abb0-de0a-477d-96c0-31efea9964e2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.147368 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"console-config\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-console-config\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.147587 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmpfs\" (UniqueName: \"kubernetes.io/empty-dir/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-tmpfs\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.147744 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mcc-auth-proxy-config\" (UniqueName: \"kubernetes.io/configmap/b0ac0929-e6e0-464c-bb52-aadcea8d2f3e-mcc-auth-proxy-config\") pod \"machine-config-controller-84d6567774-mpgjt\" (UID: \"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.147901 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"stats-auth\" (UniqueName: \"kubernetes.io/secret/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-stats-auth\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.148239 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/2f69fc9c-de93-4cae-a491-50698ac10599-audit-policies\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.147823 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-config\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.148453 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.149066 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-trusted-ca-bundle\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.149200 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/f415abb0-de0a-477d-96c0-31efea9964e2-serving-cert\") pod \"openshift-apiserver-operator-796bbdcf4f-mgkmk\" (UID: \"f415abb0-de0a-477d-96c0-31efea9964e2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.149689 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-serving-cert\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.150012 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"default-certificate\" (UniqueName: \"kubernetes.io/secret/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-default-certificate\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.150769 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"image-registry-operator-tls\" (UniqueName: \"kubernetes.io/secret/0dc6d7a2-494d-462d-8972-de55d1e36f31-image-registry-operator-tls\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.151601 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"machine-approver-tls\" (UniqueName: \"kubernetes.io/secret/28e618ae-43ac-4fc4-83c2-28462346e2a9-machine-approver-tls\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.151769 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1eeb4a6d-f2da-4006-aa3a-37f3adb4937b-serving-cert\") pod \"kube-controller-manager-operator-78b949d7b-gw6b8\" (UID: \"1eeb4a6d-f2da-4006-aa3a-37f3adb4937b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.153651 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/d752aecd-9a39-430c-9262-c81434b3f08a-config\") pod \"kube-storage-version-migrator-operator-b67b599dd-xfnw9\" (UID: \"d752aecd-9a39-430c-9262-c81434b3f08a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.153963 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/49bb8ae9-ab85-458a-850e-8d3b48ab42de-serving-cert\") pod \"openshift-config-operator-7777fb866f-zww4t\" (UID: \"49bb8ae9-ab85-458a-850e-8d3b48ab42de\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.168861 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.187498 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.190732 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/d752aecd-9a39-430c-9262-c81434b3f08a-serving-cert\") pod \"kube-storage-version-migrator-operator-b67b599dd-xfnw9\" (UID: \"d752aecd-9a39-430c-9262-c81434b3f08a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.208122 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.227602 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.256084 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.267037 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-trusted-ca-bundle\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.267768 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.287629 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.307381 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.327389 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.335396 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"service-ca\" (UniqueName: \"kubernetes.io/configmap/4349f16a-1b36-45b5-aa93-f982bcaa781f-service-ca\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.367425 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.387523 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.407424 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.427983 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.439021 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75-profile-collector-cert\") pod \"catalog-operator-68c6474976-62q8r\" (UID: \"d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.447726 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.467196 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.488178 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.507991 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.513628 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/1167d13e-be01-4849-a6e8-0b5f505a0caa-serving-cert\") pod \"openshift-controller-manager-operator-756b6f6bc6-bbsj7\" (UID: \"1167d13e-be01-4849-a6e8-0b5f505a0caa\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.528526 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.537138 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/1167d13e-be01-4849-a6e8-0b5f505a0caa-config\") pod \"openshift-controller-manager-operator-756b6f6bc6-bbsj7\" (UID: \"1167d13e-be01-4849-a6e8-0b5f505a0caa\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.547560 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.567450 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.588155 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.597123 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75-srv-cert\") pod \"catalog-operator-68c6474976-62q8r\" (UID: \"d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.608228 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.628673 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.632376 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/baf3ba36-cf4a-4780-b954-913da501aab7-metrics-tls\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.653527 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.660874 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/baf3ba36-cf4a-4780-b954-913da501aab7-trusted-ca\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.667980 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.687922 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.707190 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.726894 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.749512 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.768172 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.787212 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.808148 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.813627 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/b0ac0929-e6e0-464c-bb52-aadcea8d2f3e-proxy-tls\") pod \"machine-config-controller-84d6567774-mpgjt\" (UID: \"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.828238 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.847849 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.863693 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/24b77217-7ebc-4165-a7b5-d7171adbc29a-serving-cert\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8kdf7\" (UID: \"24b77217-7ebc-4165-a7b5-d7171adbc29a\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.868061 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.887848 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.907725 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.910884 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/24b77217-7ebc-4165-a7b5-d7171adbc29a-config\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8kdf7\" (UID: \"24b77217-7ebc-4165-a7b5-d7171adbc29a\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.927981 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.947866 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.951744 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-apiservice-cert\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.958055 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-webhook-cert\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.968090 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.986827 4875 request.go:700] Waited for 1.020006324s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/secrets?fieldSelector=metadata.name%3Dmarketplace-operator-metrics&limit=500&resourceVersion=0 Nov 26 00:09:27 crc kubenswrapper[4875]: I1126 00:09:27.988463 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.008086 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.027870 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.032985 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-bootstrap-token\" (UniqueName: \"kubernetes.io/secret/5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36-node-bootstrap-token\") pod \"machine-config-server-nt2m7\" (UID: \"5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36\") " pod="openshift-machine-config-operator/machine-config-server-nt2m7" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.048171 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.068077 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.079960 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"certs\" (UniqueName: \"kubernetes.io/secret/5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36-certs\") pod \"machine-config-server-nt2m7\" (UID: \"5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36\") " pod="openshift-machine-config-operator/machine-config-server-nt2m7" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.093296 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.107374 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.127636 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.131805 4875 secret.go:188] Couldn't get secret openshift-etcd-operator/etcd-client: failed to sync secret cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.131846 4875 configmap.go:193] Couldn't get configMap openshift-etcd-operator/etcd-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.131859 4875 secret.go:188] Couldn't get secret openshift-operator-lifecycle-manager/package-server-manager-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.131900 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-client podName:715ac3a3-bef6-4122-a4b0-3ea80be8a7eb nodeName:}" failed. No retries permitted until 2025-11-26 00:09:28.631874707 +0000 UTC m=+141.821850402 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-client" (UniqueName: "kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-client") pod "etcd-operator-b45778765-9r68s" (UID: "715ac3a3-bef6-4122-a4b0-3ea80be8a7eb") : failed to sync secret cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.131924 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-ca podName:715ac3a3-bef6-4122-a4b0-3ea80be8a7eb nodeName:}" failed. No retries permitted until 2025-11-26 00:09:28.631913348 +0000 UTC m=+141.821889163 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-ca" (UniqueName: "kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-ca") pod "etcd-operator-b45778765-9r68s" (UID: "715ac3a3-bef6-4122-a4b0-3ea80be8a7eb") : failed to sync configmap cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.131940 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/c05d440a-597d-4b70-9408-2b3143be393d-package-server-manager-serving-cert podName:c05d440a-597d-4b70-9408-2b3143be393d nodeName:}" failed. No retries permitted until 2025-11-26 00:09:28.631932588 +0000 UTC m=+141.821908283 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "package-server-manager-serving-cert" (UniqueName: "kubernetes.io/secret/c05d440a-597d-4b70-9408-2b3143be393d-package-server-manager-serving-cert") pod "package-server-manager-789f6589d5-fgpzs" (UID: "c05d440a-597d-4b70-9408-2b3143be393d") : failed to sync secret cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.134268 4875 configmap.go:193] Couldn't get configMap openshift-etcd-operator/etcd-service-ca-bundle: failed to sync configmap cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.134297 4875 configmap.go:193] Couldn't get configMap openshift-machine-config-operator/machine-config-operator-images: failed to sync configmap cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.134323 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-service-ca podName:715ac3a3-bef6-4122-a4b0-3ea80be8a7eb nodeName:}" failed. No retries permitted until 2025-11-26 00:09:28.634306199 +0000 UTC m=+141.824281894 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "etcd-service-ca" (UniqueName: "kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-service-ca") pod "etcd-operator-b45778765-9r68s" (UID: "715ac3a3-bef6-4122-a4b0-3ea80be8a7eb") : failed to sync configmap cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.134336 4875 secret.go:188] Couldn't get secret openshift-machine-config-operator/mco-proxy-tls: failed to sync secret cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.134355 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/85ee0261-b699-4159-9ee7-123628b1f1c0-images podName:85ee0261-b699-4159-9ee7-123628b1f1c0 nodeName:}" failed. No retries permitted until 2025-11-26 00:09:28.634336709 +0000 UTC m=+141.824312464 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "images" (UniqueName: "kubernetes.io/configmap/85ee0261-b699-4159-9ee7-123628b1f1c0-images") pod "machine-config-operator-74547568cd-bsvcq" (UID: "85ee0261-b699-4159-9ee7-123628b1f1c0") : failed to sync configmap cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.134381 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/85ee0261-b699-4159-9ee7-123628b1f1c0-proxy-tls podName:85ee0261-b699-4159-9ee7-123628b1f1c0 nodeName:}" failed. No retries permitted until 2025-11-26 00:09:28.63437299 +0000 UTC m=+141.824348755 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "proxy-tls" (UniqueName: "kubernetes.io/secret/85ee0261-b699-4159-9ee7-123628b1f1c0-proxy-tls") pod "machine-config-operator-74547568cd-bsvcq" (UID: "85ee0261-b699-4159-9ee7-123628b1f1c0") : failed to sync secret cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.137111 4875 secret.go:188] Couldn't get secret openshift-machine-api/control-plane-machine-set-operator-tls: failed to sync secret cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.137181 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/986802f7-9a9e-4191-8545-0ccda0c2bcf5-control-plane-machine-set-operator-tls podName:986802f7-9a9e-4191-8545-0ccda0c2bcf5 nodeName:}" failed. No retries permitted until 2025-11-26 00:09:28.637152612 +0000 UTC m=+141.827128307 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "control-plane-machine-set-operator-tls" (UniqueName: "kubernetes.io/secret/986802f7-9a9e-4191-8545-0ccda0c2bcf5-control-plane-machine-set-operator-tls") pod "control-plane-machine-set-operator-78cbb6b69f-rln4j" (UID: "986802f7-9a9e-4191-8545-0ccda0c2bcf5") : failed to sync secret cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.137845 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-key\" (UniqueName: \"kubernetes.io/secret/83b8aa62-fa25-4831-8fba-c08bb0887d14-signing-key\") pod \"service-ca-9c57cc56f-pq492\" (UID: \"83b8aa62-fa25-4831-8fba-c08bb0887d14\") " pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.140048 4875 secret.go:188] Couldn't get secret openshift-etcd-operator/etcd-operator-serving-cert: failed to sync secret cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.140197 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-serving-cert podName:715ac3a3-bef6-4122-a4b0-3ea80be8a7eb nodeName:}" failed. No retries permitted until 2025-11-26 00:09:28.640145049 +0000 UTC m=+141.830120814 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "serving-cert" (UniqueName: "kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-serving-cert") pod "etcd-operator-b45778765-9r68s" (UID: "715ac3a3-bef6-4122-a4b0-3ea80be8a7eb") : failed to sync secret cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.145452 4875 configmap.go:193] Couldn't get configMap openshift-etcd-operator/etcd-operator-config: failed to sync configmap cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.145529 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-config podName:715ac3a3-bef6-4122-a4b0-3ea80be8a7eb nodeName:}" failed. No retries permitted until 2025-11-26 00:09:28.645507177 +0000 UTC m=+141.835482962 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "config" (UniqueName: "kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-config") pod "etcd-operator-b45778765-9r68s" (UID: "715ac3a3-bef6-4122-a4b0-3ea80be8a7eb") : failed to sync configmap cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.147495 4875 configmap.go:193] Couldn't get configMap openshift-service-ca/signing-cabundle: failed to sync configmap cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: E1126 00:09:28.147562 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/configmap/83b8aa62-fa25-4831-8fba-c08bb0887d14-signing-cabundle podName:83b8aa62-fa25-4831-8fba-c08bb0887d14 nodeName:}" failed. No retries permitted until 2025-11-26 00:09:28.647545049 +0000 UTC m=+141.837520744 (durationBeforeRetry 500ms). Error: MountVolume.SetUp failed for volume "signing-cabundle" (UniqueName: "kubernetes.io/configmap/83b8aa62-fa25-4831-8fba-c08bb0887d14-signing-cabundle") pod "service-ca-9c57cc56f-pq492" (UID: "83b8aa62-fa25-4831-8fba-c08bb0887d14") : failed to sync configmap cache: timed out waiting for the condition Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.147725 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.167475 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.188265 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.208717 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.227381 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.248370 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.268137 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.288384 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.307550 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.327712 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.348055 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.368130 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.388280 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.407693 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.428029 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.448429 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.488485 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.508194 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.528211 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.547908 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.568648 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.588441 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.608760 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.628254 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.654285 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"images\" (UniqueName: \"kubernetes.io/configmap/85ee0261-b699-4159-9ee7-123628b1f1c0-images\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.654349 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/85ee0261-b699-4159-9ee7-123628b1f1c0-proxy-tls\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.654377 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-service-ca\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.654597 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/986802f7-9a9e-4191-8545-0ccda0c2bcf5-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-rln4j\" (UID: \"986802f7-9a9e-4191-8545-0ccda0c2bcf5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.654634 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-serving-cert\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.655618 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"images\" (UniqueName: \"kubernetes.io/configmap/85ee0261-b699-4159-9ee7-123628b1f1c0-images\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.655872 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-config\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.656068 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/83b8aa62-fa25-4831-8fba-c08bb0887d14-signing-cabundle\") pod \"service-ca-9c57cc56f-pq492\" (UID: \"83b8aa62-fa25-4831-8fba-c08bb0887d14\") " pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.656149 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-ca\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.656208 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-client\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.656228 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c05d440a-597d-4b70-9408-2b3143be393d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fgpzs\" (UID: \"c05d440a-597d-4b70-9408-2b3143be393d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.656581 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-config\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.656920 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-service-ca\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-service-ca\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.657064 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-ca\" (UniqueName: \"kubernetes.io/configmap/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-ca\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.657263 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"signing-cabundle\" (UniqueName: \"kubernetes.io/configmap/83b8aa62-fa25-4831-8fba-c08bb0887d14-signing-cabundle\") pod \"service-ca-9c57cc56f-pq492\" (UID: \"83b8aa62-fa25-4831-8fba-c08bb0887d14\") " pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.659112 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-tls\" (UniqueName: \"kubernetes.io/secret/85ee0261-b699-4159-9ee7-123628b1f1c0-proxy-tls\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.660069 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-serving-cert\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.660738 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etcd-client\" (UniqueName: \"kubernetes.io/secret/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-etcd-client\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.660855 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"control-plane-machine-set-operator-tls\" (UniqueName: \"kubernetes.io/secret/986802f7-9a9e-4191-8545-0ccda0c2bcf5-control-plane-machine-set-operator-tls\") pod \"control-plane-machine-set-operator-78cbb6b69f-rln4j\" (UID: \"986802f7-9a9e-4191-8545-0ccda0c2bcf5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.661324 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xhlvk\" (UniqueName: \"kubernetes.io/projected/55a68700-c0f7-4ae5-a252-013c8fbab545-kube-api-access-xhlvk\") pod \"image-pruner-29401920-nz8v2\" (UID: \"55a68700-c0f7-4ae5-a252-013c8fbab545\") " pod="openshift-image-registry/image-pruner-29401920-nz8v2" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.661825 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"package-server-manager-serving-cert\" (UniqueName: \"kubernetes.io/secret/c05d440a-597d-4b70-9408-2b3143be393d-package-server-manager-serving-cert\") pod \"package-server-manager-789f6589d5-fgpzs\" (UID: \"c05d440a-597d-4b70-9408-2b3143be393d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.681894 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8f5px\" (UniqueName: \"kubernetes.io/projected/42c80491-a1f7-4f1f-b2a0-db682fbc490e-kube-api-access-8f5px\") pod \"route-controller-manager-6576b87f9c-rlkpg\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.702711 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6nmnh\" (UniqueName: \"kubernetes.io/projected/3a569b99-f211-4204-a526-314e066143c2-kube-api-access-6nmnh\") pod \"controller-manager-879f6c89f-5mx4v\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.707479 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.721084 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f5k8q\" (UniqueName: \"kubernetes.io/projected/a1910aeb-3a24-4922-ada0-a47d871c0998-kube-api-access-f5k8q\") pod \"apiserver-76f77b778f-8fcp4\" (UID: \"a1910aeb-3a24-4922-ada0-a47d871c0998\") " pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.741292 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-md7bb\" (UniqueName: \"kubernetes.io/projected/d2c99872-a302-4599-96b7-2320051f069c-kube-api-access-md7bb\") pod \"machine-api-operator-5694c8668f-pkmj5\" (UID: \"d2c99872-a302-4599-96b7-2320051f069c\") " pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.747858 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.753611 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.762793 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.768062 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.784996 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29401920-nz8v2" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.788956 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.799474 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.808991 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.831309 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.847272 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.870812 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.890498 4875 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.910750 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.930694 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.950198 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.965457 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg"] Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.966516 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver/apiserver-76f77b778f-8fcp4"] Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.969493 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/machine-api-operator-5694c8668f-pkmj5"] Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.970094 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.984181 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-pruner-29401920-nz8v2"] Nov 26 00:09:28 crc kubenswrapper[4875]: I1126 00:09:28.987705 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 00:09:28 crc kubenswrapper[4875]: W1126 00:09:28.992582 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod55a68700_c0f7_4ae5_a252_013c8fbab545.slice/crio-f1c3f616f121bbba82e5e4fd3c5324139ca7decef01a101bc01a7282016ba4af WatchSource:0}: Error finding container f1c3f616f121bbba82e5e4fd3c5324139ca7decef01a101bc01a7282016ba4af: Status 404 returned error can't find the container with id f1c3f616f121bbba82e5e4fd3c5324139ca7decef01a101bc01a7282016ba4af Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.006489 4875 request.go:700] Waited for 1.900186792s due to client-side throttling, not priority and fairness, request: GET:https://api-int.crc.testing:6443/api/v1/namespaces/openshift-ingress-canary/configmaps?fieldSelector=metadata.name%3Dopenshift-service-ca.crt&limit=500&resourceVersion=0 Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.007817 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.013428 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mx4v"] Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.028335 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 00:09:29 crc kubenswrapper[4875]: W1126 00:09:29.040688 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3a569b99_f211_4204_a526_314e066143c2.slice/crio-c02e6919499bc192766025724d7a4be9e79853133112784067578dcbabb000ca WatchSource:0}: Error finding container c02e6919499bc192766025724d7a4be9e79853133112784067578dcbabb000ca: Status 404 returned error can't find the container with id c02e6919499bc192766025724d7a4be9e79853133112784067578dcbabb000ca Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.048142 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.083187 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wkrv9\" (UniqueName: \"kubernetes.io/projected/4349f16a-1b36-45b5-aa93-f982bcaa781f-kube-api-access-wkrv9\") pod \"console-f9d7485db-l4gfb\" (UID: \"4349f16a-1b36-45b5-aa93-f982bcaa781f\") " pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.108066 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-knmtn\" (UniqueName: \"kubernetes.io/projected/83b8aa62-fa25-4831-8fba-c08bb0887d14-kube-api-access-knmtn\") pod \"service-ca-9c57cc56f-pq492\" (UID: \"83b8aa62-fa25-4831-8fba-c08bb0887d14\") " pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.123658 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-crbdp\" (UniqueName: \"kubernetes.io/projected/b226d68e-dbae-401a-88d4-ccc0cb1a51cf-kube-api-access-crbdp\") pod \"router-default-5444994796-bplfw\" (UID: \"b226d68e-dbae-401a-88d4-ccc0cb1a51cf\") " pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.141433 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pqcsj\" (UniqueName: \"kubernetes.io/projected/28e618ae-43ac-4fc4-83c2-28462346e2a9-kube-api-access-pqcsj\") pod \"machine-approver-56656f9798-7dk2l\" (UID: \"28e618ae-43ac-4fc4-83c2-28462346e2a9\") " pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.165390 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tzzsx\" (UniqueName: \"kubernetes.io/projected/0c1975b8-52f8-4b58-a5f3-cc74156dcd75-kube-api-access-tzzsx\") pod \"packageserver-d55dfcdfc-t2hxd\" (UID: \"0c1975b8-52f8-4b58-a5f3-cc74156dcd75\") " pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.172252 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" event={"ID":"3a569b99-f211-4204-a526-314e066143c2","Type":"ContainerStarted","Data":"c02e6919499bc192766025724d7a4be9e79853133112784067578dcbabb000ca"} Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.173069 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" event={"ID":"a1910aeb-3a24-4922-ada0-a47d871c0998","Type":"ContainerStarted","Data":"b06e8fb36ca885cce5b582e9449a6ca71773dcf35a8b5a5421278dd836493656"} Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.173788 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29401920-nz8v2" event={"ID":"55a68700-c0f7-4ae5-a252-013c8fbab545","Type":"ContainerStarted","Data":"f1c3f616f121bbba82e5e4fd3c5324139ca7decef01a101bc01a7282016ba4af"} Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.174437 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" event={"ID":"d2c99872-a302-4599-96b7-2320051f069c","Type":"ContainerStarted","Data":"bf18ce82d6bd86d7bc91adfba3211cf2d9a188d6aee191ea17081e874169f1fe"} Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.175070 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" event={"ID":"42c80491-a1f7-4f1f-b2a0-db682fbc490e","Type":"ContainerStarted","Data":"9f15e095da8fc9188d7c6ca904d3791cf079efcb5d2e00c579b980e05aa20b7e"} Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.184405 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wfg4q\" (UniqueName: \"kubernetes.io/projected/1167d13e-be01-4849-a6e8-0b5f505a0caa-kube-api-access-wfg4q\") pod \"openshift-controller-manager-operator-756b6f6bc6-bbsj7\" (UID: \"1167d13e-be01-4849-a6e8-0b5f505a0caa\") " pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.204319 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rzlfd\" (UniqueName: \"kubernetes.io/projected/f415abb0-de0a-477d-96c0-31efea9964e2-kube-api-access-rzlfd\") pod \"openshift-apiserver-operator-796bbdcf4f-mgkmk\" (UID: \"f415abb0-de0a-477d-96c0-31efea9964e2\") " pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.220226 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/baf3ba36-cf4a-4780-b954-913da501aab7-bound-sa-token\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.221990 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:29 crc kubenswrapper[4875]: W1126 00:09:29.234087 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb226d68e_dbae_401a_88d4_ccc0cb1a51cf.slice/crio-dfbba5a82bdbb0f24466997f2bf6bb49361860cb0baf24192baedc55e71b098e WatchSource:0}: Error finding container dfbba5a82bdbb0f24466997f2bf6bb49361860cb0baf24192baedc55e71b098e: Status 404 returned error can't find the container with id dfbba5a82bdbb0f24466997f2bf6bb49361860cb0baf24192baedc55e71b098e Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.241563 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/1eeb4a6d-f2da-4006-aa3a-37f3adb4937b-kube-api-access\") pod \"kube-controller-manager-operator-78b949d7b-gw6b8\" (UID: \"1eeb4a6d-f2da-4006-aa3a-37f3adb4937b\") " pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.259094 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.261148 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-59t7w\" (UniqueName: \"kubernetes.io/projected/715ac3a3-bef6-4122-a4b0-3ea80be8a7eb-kube-api-access-59t7w\") pod \"etcd-operator-b45778765-9r68s\" (UID: \"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb\") " pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.278507 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.281469 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/0dc6d7a2-494d-462d-8972-de55d1e36f31-bound-sa-token\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.297538 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.302570 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hvm5c\" (UniqueName: \"kubernetes.io/projected/8f3c50f0-6fff-4f1c-a0b6-f6756823da65-kube-api-access-hvm5c\") pod \"authentication-operator-69f744f599-55rwl\" (UID: \"8f3c50f0-6fff-4f1c-a0b6-f6756823da65\") " pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.326790 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2n24j\" (UniqueName: \"kubernetes.io/projected/d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75-kube-api-access-2n24j\") pod \"catalog-operator-68c6474976-62q8r\" (UID: \"d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75\") " pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.352722 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dzsn6\" (UniqueName: \"kubernetes.io/projected/0dc6d7a2-494d-462d-8972-de55d1e36f31-kube-api-access-dzsn6\") pod \"cluster-image-registry-operator-dc59b4c8b-54wbh\" (UID: \"0dc6d7a2-494d-462d-8972-de55d1e36f31\") " pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.365524 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jql29\" (UniqueName: \"kubernetes.io/projected/c05d440a-597d-4b70-9408-2b3143be393d-kube-api-access-jql29\") pod \"package-server-manager-789f6589d5-fgpzs\" (UID: \"c05d440a-597d-4b70-9408-2b3143be393d\") " pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.374093 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.384959 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w7xd4\" (UniqueName: \"kubernetes.io/projected/5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36-kube-api-access-w7xd4\") pod \"machine-config-server-nt2m7\" (UID: \"5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36\") " pod="openshift-machine-config-operator/machine-config-server-nt2m7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.396812 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca/service-ca-9c57cc56f-pq492" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.401989 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.403991 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bm8dl\" (UniqueName: \"kubernetes.io/projected/d752aecd-9a39-430c-9262-c81434b3f08a-kube-api-access-bm8dl\") pod \"kube-storage-version-migrator-operator-b67b599dd-xfnw9\" (UID: \"d752aecd-9a39-430c-9262-c81434b3f08a\") " pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.405842 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.424767 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.425661 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-2dj2t\" (UniqueName: \"kubernetes.io/projected/b118a730-fb77-4632-83f3-9b94b4d52b2c-kube-api-access-2dj2t\") pod \"downloads-7954f5f757-7s69q\" (UID: \"b118a730-fb77-4632-83f3-9b94b4d52b2c\") " pod="openshift-console/downloads-7954f5f757-7s69q" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.448856 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4xwpm\" (UniqueName: \"kubernetes.io/projected/baf3ba36-cf4a-4780-b954-913da501aab7-kube-api-access-4xwpm\") pod \"ingress-operator-5b745b69d9-gd8c4\" (UID: \"baf3ba36-cf4a-4780-b954-913da501aab7\") " pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.456206 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/console-f9d7485db-l4gfb"] Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.469514 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.469938 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-v825q\" (UniqueName: \"kubernetes.io/projected/1da5021c-2176-41cb-9c3c-d4ccbe299664-kube-api-access-v825q\") pod \"migrator-59844c95c7-qxqtr\" (UID: \"1da5021c-2176-41cb-9c3c-d4ccbe299664\") " pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.478185 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.491016 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/24b77217-7ebc-4165-a7b5-d7171adbc29a-kube-api-access\") pod \"openshift-kube-scheduler-operator-5fdd9b5758-8kdf7\" (UID: \"24b77217-7ebc-4165-a7b5-d7171adbc29a\") " pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.505660 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8"] Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.507578 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9pgq7\" (UniqueName: \"kubernetes.io/projected/49bb8ae9-ab85-458a-850e-8d3b48ab42de-kube-api-access-9pgq7\") pod \"openshift-config-operator-7777fb866f-zww4t\" (UID: \"49bb8ae9-ab85-458a-850e-8d3b48ab42de\") " pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.514532 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console/downloads-7954f5f757-7s69q" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.530713 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.530989 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-76jc9\" (UniqueName: \"kubernetes.io/projected/85ee0261-b699-4159-9ee7-123628b1f1c0-kube-api-access-76jc9\") pod \"machine-config-operator-74547568cd-bsvcq\" (UID: \"85ee0261-b699-4159-9ee7-123628b1f1c0\") " pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.549463 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xfdxj\" (UniqueName: \"kubernetes.io/projected/2f69fc9c-de93-4cae-a491-50698ac10599-kube-api-access-xfdxj\") pod \"apiserver-7bbb656c7d-l4vms\" (UID: \"2f69fc9c-de93-4cae-a491-50698ac10599\") " pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.576315 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5cnhn\" (UniqueName: \"kubernetes.io/projected/986802f7-9a9e-4191-8545-0ccda0c2bcf5-kube-api-access-5cnhn\") pod \"control-plane-machine-set-operator-78cbb6b69f-rln4j\" (UID: \"986802f7-9a9e-4191-8545-0ccda0c2bcf5\") " pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.581545 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.588521 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9djpq\" (UniqueName: \"kubernetes.io/projected/b0ac0929-e6e0-464c-bb52-aadcea8d2f3e-kube-api-access-9djpq\") pod \"machine-config-controller-84d6567774-mpgjt\" (UID: \"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e\") " pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.604674 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.630385 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd"] Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.637611 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.637833 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.644087 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.659631 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.668834 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9d929481-cfc4-4d91-b925-ac3a9127e007-srv-cert\") pod \"olm-operator-6b444d44fb-dbctw\" (UID: \"9d929481-cfc4-4d91-b925-ac3a9127e007\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.668870 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9d929481-cfc4-4d91-b925-ac3a9127e007-profile-collector-cert\") pod \"olm-operator-6b444d44fb-dbctw\" (UID: \"9d929481-cfc4-4d91-b925-ac3a9127e007\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.668888 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.668906 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-installation-pull-secrets\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.668920 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.668939 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/872d7004-c1ed-4411-bfcc-a50cfad48c96-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-dgkwj\" (UID: \"872d7004-c1ed-4411-bfcc-a50cfad48c96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.668956 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d2fae84-4466-4ef1-af30-b6df2e28f931-config\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.668982 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d2fae84-4466-4ef1-af30-b6df2e28f931-serving-cert\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.668996 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669013 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669029 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-bound-sa-token\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669046 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669062 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-tls\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669076 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669091 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669105 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-ca-trust-extracted\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669120 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669138 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669154 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-trusted-ca\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669167 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-6ml5v\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669182 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2d2fae84-4466-4ef1-af30-b6df2e28f931-trusted-ca\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669198 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669212 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f4dwr\" (UniqueName: \"kubernetes.io/projected/0a3941af-01d7-44c0-8e51-cc719e7f4835-kube-api-access-f4dwr\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669227 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pc2vl\" (UniqueName: \"kubernetes.io/projected/2d2fae84-4466-4ef1-af30-b6df2e28f931-kube-api-access-pc2vl\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669249 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-policies\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669265 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-dir\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669282 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rx4gr\" (UniqueName: \"kubernetes.io/projected/872d7004-c1ed-4411-bfcc-a50cfad48c96-kube-api-access-rx4gr\") pod \"cluster-samples-operator-665b6dd947-dgkwj\" (UID: \"872d7004-c1ed-4411-bfcc-a50cfad48c96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669296 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-t7rld\" (UniqueName: \"kubernetes.io/projected/9d929481-cfc4-4d91-b925-ac3a9127e007-kube-api-access-t7rld\") pod \"olm-operator-6b444d44fb-dbctw\" (UID: \"9d929481-cfc4-4d91-b925-ac3a9127e007\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669317 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669334 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-6ml5v\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669355 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-58bm2\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-kube-api-access-58bm2\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669378 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669394 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/84a8202a-8464-4308-8ecb-dfb811933d5b-metrics-tls\") pod \"dns-operator-744455d44c-8t466\" (UID: \"84a8202a-8464-4308-8ecb-dfb811933d5b\") " pod="openshift-dns-operator/dns-operator-744455d44c-8t466" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669425 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-certificates\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669441 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-zz5pt\" (UniqueName: \"kubernetes.io/projected/84a8202a-8464-4308-8ecb-dfb811933d5b-kube-api-access-zz5pt\") pod \"dns-operator-744455d44c-8t466\" (UID: \"84a8202a-8464-4308-8ecb-dfb811933d5b\") " pod="openshift-dns-operator/dns-operator-744455d44c-8t466" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.669454 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tv45p\" (UniqueName: \"kubernetes.io/projected/d725db0e-68dc-4655-ae0f-8e847ef651c3-kube-api-access-tv45p\") pod \"marketplace-operator-79b997595-6ml5v\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:29 crc kubenswrapper[4875]: E1126 00:09:29.669841 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:30.169830738 +0000 UTC m=+143.359806433 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.690300 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-server-nt2m7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.708361 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.716710 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.732263 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs"] Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.750509 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.758917 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.770751 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.771872 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-6ml5v\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.771927 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b988074b-ff7d-4921-b9a7-bda74319fcc9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-tfknb\" (UID: \"b988074b-ff7d-4921-b9a7-bda74319fcc9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.771968 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-58bm2\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-kube-api-access-58bm2\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.772047 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bce1a18d-ad14-442b-afb3-78b1c7213379-config\") pod \"kube-apiserver-operator-766d6c64bb-tkvbn\" (UID: \"bce1a18d-ad14-442b-afb3-78b1c7213379\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.772139 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/601c8445-3908-4677-9df9-422f8e97d928-config-volume\") pod \"collect-profiles-29401920-pfkvn\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.772194 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-mountpoint-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.772290 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.772378 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-certificates\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.772761 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/84a8202a-8464-4308-8ecb-dfb811933d5b-metrics-tls\") pod \"dns-operator-744455d44c-8t466\" (UID: \"84a8202a-8464-4308-8ecb-dfb811933d5b\") " pod="openshift-dns-operator/dns-operator-744455d44c-8t466" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.772863 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd4dda6b-c6c4-4716-80b2-95ce65914125-config\") pod \"service-ca-operator-777779d784-btrqx\" (UID: \"dd4dda6b-c6c4-4716-80b2-95ce65914125\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.772918 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-zz5pt\" (UniqueName: \"kubernetes.io/projected/84a8202a-8464-4308-8ecb-dfb811933d5b-kube-api-access-zz5pt\") pod \"dns-operator-744455d44c-8t466\" (UID: \"84a8202a-8464-4308-8ecb-dfb811933d5b\") " pod="openshift-dns-operator/dns-operator-744455d44c-8t466" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.772938 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tv45p\" (UniqueName: \"kubernetes.io/projected/d725db0e-68dc-4655-ae0f-8e847ef651c3-kube-api-access-tv45p\") pod \"marketplace-operator-79b997595-6ml5v\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.773001 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/601c8445-3908-4677-9df9-422f8e97d928-secret-volume\") pod \"collect-profiles-29401920-pfkvn\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.773161 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bce1a18d-ad14-442b-afb3-78b1c7213379-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-tkvbn\" (UID: \"bce1a18d-ad14-442b-afb3-78b1c7213379\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" Nov 26 00:09:29 crc kubenswrapper[4875]: E1126 00:09:29.773371 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:30.27334672 +0000 UTC m=+143.463322475 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.773452 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9d929481-cfc4-4d91-b925-ac3a9127e007-srv-cert\") pod \"olm-operator-6b444d44fb-dbctw\" (UID: \"9d929481-cfc4-4d91-b925-ac3a9127e007\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.773503 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-x9trx\" (UniqueName: \"kubernetes.io/projected/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-kube-api-access-x9trx\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.773796 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9d929481-cfc4-4d91-b925-ac3a9127e007-profile-collector-cert\") pod \"olm-operator-6b444d44fb-dbctw\" (UID: \"9d929481-cfc4-4d91-b925-ac3a9127e007\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.773875 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-csi-data-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.773971 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.774029 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.774100 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-installation-pull-secrets\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.774129 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-plugins-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.774282 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bce1a18d-ad14-442b-afb3-78b1c7213379-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-tkvbn\" (UID: \"bce1a18d-ad14-442b-afb3-78b1c7213379\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.774304 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd4dda6b-c6c4-4716-80b2-95ce65914125-serving-cert\") pod \"service-ca-operator-777779d784-btrqx\" (UID: \"dd4dda6b-c6c4-4716-80b2-95ce65914125\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.774358 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/872d7004-c1ed-4411-bfcc-a50cfad48c96-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-dgkwj\" (UID: \"872d7004-c1ed-4411-bfcc-a50cfad48c96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.774455 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vksp5\" (UniqueName: \"kubernetes.io/projected/dd4dda6b-c6c4-4716-80b2-95ce65914125-kube-api-access-vksp5\") pod \"service-ca-operator-777779d784-btrqx\" (UID: \"dd4dda6b-c6c4-4716-80b2-95ce65914125\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.774546 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d2fae84-4466-4ef1-af30-b6df2e28f931-config\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.774710 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jw66c\" (UniqueName: \"kubernetes.io/projected/b988074b-ff7d-4921-b9a7-bda74319fcc9-kube-api-access-jw66c\") pod \"multus-admission-controller-857f4d67dd-tfknb\" (UID: \"b988074b-ff7d-4921-b9a7-bda74319fcc9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.774869 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d2fae84-4466-4ef1-af30-b6df2e28f931-serving-cert\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775023 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775090 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775131 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/888a01e1-690c-41d1-bcc2-1f1403a205dd-metrics-tls\") pod \"dns-default-j5qr7\" (UID: \"888a01e1-690c-41d1-bcc2-1f1403a205dd\") " pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775153 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775209 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-bound-sa-token\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775231 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f22qb\" (UniqueName: \"kubernetes.io/projected/6cdd823a-c12a-427c-8273-924e5295319c-kube-api-access-f22qb\") pod \"ingress-canary-d5dcs\" (UID: \"6cdd823a-c12a-427c-8273-924e5295319c\") " pod="openshift-ingress-canary/ingress-canary-d5dcs" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775267 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-tls\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775291 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775314 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-registration-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775350 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775374 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-ca-trust-extracted\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775486 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775531 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775569 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-6ml5v\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775593 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2d2fae84-4466-4ef1-af30-b6df2e28f931-trusted-ca\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775619 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-n8gzv\" (UniqueName: \"kubernetes.io/projected/601c8445-3908-4677-9df9-422f8e97d928-kube-api-access-n8gzv\") pod \"collect-profiles-29401920-pfkvn\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775656 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-trusted-ca\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775661 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-certificates\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775719 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-socket-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775744 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/888a01e1-690c-41d1-bcc2-1f1403a205dd-config-volume\") pod \"dns-default-j5qr7\" (UID: \"888a01e1-690c-41d1-bcc2-1f1403a205dd\") " pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775775 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.775800 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f4dwr\" (UniqueName: \"kubernetes.io/projected/0a3941af-01d7-44c0-8e51-cc719e7f4835-kube-api-access-f4dwr\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.777622 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pc2vl\" (UniqueName: \"kubernetes.io/projected/2d2fae84-4466-4ef1-af30-b6df2e28f931-kube-api-access-pc2vl\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.777701 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-policies\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.778091 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-dir\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.781132 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-router-certs\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.782161 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-etcd-operator/etcd-operator-b45778765-9r68s"] Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.784509 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-dir\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.786397 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/2d2fae84-4466-4ef1-af30-b6df2e28f931-trusted-ca\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.787174 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6cdd823a-c12a-427c-8273-924e5295319c-cert\") pod \"ingress-canary-d5dcs\" (UID: \"6cdd823a-c12a-427c-8273-924e5295319c\") " pod="openshift-ingress-canary/ingress-canary-d5dcs" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.787210 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fqntg\" (UniqueName: \"kubernetes.io/projected/888a01e1-690c-41d1-bcc2-1f1403a205dd-kube-api-access-fqntg\") pod \"dns-default-j5qr7\" (UID: \"888a01e1-690c-41d1-bcc2-1f1403a205dd\") " pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.787459 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-ca-trust-extracted\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.787806 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/84a8202a-8464-4308-8ecb-dfb811933d5b-metrics-tls\") pod \"dns-operator-744455d44c-8t466\" (UID: \"84a8202a-8464-4308-8ecb-dfb811933d5b\") " pod="openshift-dns-operator/dns-operator-744455d44c-8t466" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.788737 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-trusted-ca\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.788768 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-cliconfig\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.789455 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-policies\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.789972 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.790687 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rx4gr\" (UniqueName: \"kubernetes.io/projected/872d7004-c1ed-4411-bfcc-a50cfad48c96-kube-api-access-rx4gr\") pod \"cluster-samples-operator-665b6dd947-dgkwj\" (UID: \"872d7004-c1ed-4411-bfcc-a50cfad48c96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.790741 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-t7rld\" (UniqueName: \"kubernetes.io/projected/9d929481-cfc4-4d91-b925-ac3a9127e007-kube-api-access-t7rld\") pod \"olm-operator-6b444d44fb-dbctw\" (UID: \"9d929481-cfc4-4d91-b925-ac3a9127e007\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.791637 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/2d2fae84-4466-4ef1-af30-b6df2e28f931-config\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.791737 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-6ml5v\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.791926 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-service-ca\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.792528 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"profile-collector-cert\" (UniqueName: \"kubernetes.io/secret/9d929481-cfc4-4d91-b925-ac3a9127e007-profile-collector-cert\") pod \"olm-operator-6b444d44fb-dbctw\" (UID: \"9d929481-cfc4-4d91-b925-ac3a9127e007\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.793269 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.795585 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"samples-operator-tls\" (UniqueName: \"kubernetes.io/secret/872d7004-c1ed-4411-bfcc-a50cfad48c96-samples-operator-tls\") pod \"cluster-samples-operator-665b6dd947-dgkwj\" (UID: \"872d7004-c1ed-4411-bfcc-a50cfad48c96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.796632 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-6ml5v\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.797057 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7"] Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.797981 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-error\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.798491 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-installation-pull-secrets\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.800380 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-tls\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.805152 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-serving-cert\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.805275 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.805552 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-session\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.805877 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-login\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.805941 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-58bm2\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-kube-api-access-58bm2\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.806782 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.808709 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"srv-cert\" (UniqueName: \"kubernetes.io/secret/9d929481-cfc4-4d91-b925-ac3a9127e007-srv-cert\") pod \"olm-operator-6b444d44fb-dbctw\" (UID: \"9d929481-cfc4-4d91-b925-ac3a9127e007\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.822363 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/2d2fae84-4466-4ef1-af30-b6df2e28f931-serving-cert\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.829232 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-zz5pt\" (UniqueName: \"kubernetes.io/projected/84a8202a-8464-4308-8ecb-dfb811933d5b-kube-api-access-zz5pt\") pod \"dns-operator-744455d44c-8t466\" (UID: \"84a8202a-8464-4308-8ecb-dfb811933d5b\") " pod="openshift-dns-operator/dns-operator-744455d44c-8t466" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.839174 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk"] Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.851886 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tv45p\" (UniqueName: \"kubernetes.io/projected/d725db0e-68dc-4655-ae0f-8e847ef651c3-kube-api-access-tv45p\") pod \"marketplace-operator-79b997595-6ml5v\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.865328 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns-operator/dns-operator-744455d44c-8t466" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.884374 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f4dwr\" (UniqueName: \"kubernetes.io/projected/0a3941af-01d7-44c0-8e51-cc719e7f4835-kube-api-access-f4dwr\") pod \"oauth-openshift-558db77b4-v9znd\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.891459 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/888a01e1-690c-41d1-bcc2-1f1403a205dd-metrics-tls\") pod \"dns-default-j5qr7\" (UID: \"888a01e1-690c-41d1-bcc2-1f1403a205dd\") " pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.891500 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f22qb\" (UniqueName: \"kubernetes.io/projected/6cdd823a-c12a-427c-8273-924e5295319c-kube-api-access-f22qb\") pod \"ingress-canary-d5dcs\" (UID: \"6cdd823a-c12a-427c-8273-924e5295319c\") " pod="openshift-ingress-canary/ingress-canary-d5dcs" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.891533 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-registration-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.891555 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-n8gzv\" (UniqueName: \"kubernetes.io/projected/601c8445-3908-4677-9df9-422f8e97d928-kube-api-access-n8gzv\") pod \"collect-profiles-29401920-pfkvn\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.891832 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registration-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-registration-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.891881 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-socket-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.891897 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/888a01e1-690c-41d1-bcc2-1f1403a205dd-config-volume\") pod \"dns-default-j5qr7\" (UID: \"888a01e1-690c-41d1-bcc2-1f1403a205dd\") " pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892019 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"socket-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-socket-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892171 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6cdd823a-c12a-427c-8273-924e5295319c-cert\") pod \"ingress-canary-d5dcs\" (UID: \"6cdd823a-c12a-427c-8273-924e5295319c\") " pod="openshift-ingress-canary/ingress-canary-d5dcs" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892195 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fqntg\" (UniqueName: \"kubernetes.io/projected/888a01e1-690c-41d1-bcc2-1f1403a205dd-kube-api-access-fqntg\") pod \"dns-default-j5qr7\" (UID: \"888a01e1-690c-41d1-bcc2-1f1403a205dd\") " pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892532 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892561 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b988074b-ff7d-4921-b9a7-bda74319fcc9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-tfknb\" (UID: \"b988074b-ff7d-4921-b9a7-bda74319fcc9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892577 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bce1a18d-ad14-442b-afb3-78b1c7213379-config\") pod \"kube-apiserver-operator-766d6c64bb-tkvbn\" (UID: \"bce1a18d-ad14-442b-afb3-78b1c7213379\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892606 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/601c8445-3908-4677-9df9-422f8e97d928-config-volume\") pod \"collect-profiles-29401920-pfkvn\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892624 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-mountpoint-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892645 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd4dda6b-c6c4-4716-80b2-95ce65914125-config\") pod \"service-ca-operator-777779d784-btrqx\" (UID: \"dd4dda6b-c6c4-4716-80b2-95ce65914125\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892663 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/601c8445-3908-4677-9df9-422f8e97d928-secret-volume\") pod \"collect-profiles-29401920-pfkvn\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892680 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bce1a18d-ad14-442b-afb3-78b1c7213379-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-tkvbn\" (UID: \"bce1a18d-ad14-442b-afb3-78b1c7213379\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892696 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-x9trx\" (UniqueName: \"kubernetes.io/projected/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-kube-api-access-x9trx\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892717 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-csi-data-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892733 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-plugins-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892752 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/888a01e1-690c-41d1-bcc2-1f1403a205dd-config-volume\") pod \"dns-default-j5qr7\" (UID: \"888a01e1-690c-41d1-bcc2-1f1403a205dd\") " pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892760 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bce1a18d-ad14-442b-afb3-78b1c7213379-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-tkvbn\" (UID: \"bce1a18d-ad14-442b-afb3-78b1c7213379\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892863 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd4dda6b-c6c4-4716-80b2-95ce65914125-serving-cert\") pod \"service-ca-operator-777779d784-btrqx\" (UID: \"dd4dda6b-c6c4-4716-80b2-95ce65914125\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892894 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vksp5\" (UniqueName: \"kubernetes.io/projected/dd4dda6b-c6c4-4716-80b2-95ce65914125-kube-api-access-vksp5\") pod \"service-ca-operator-777779d784-btrqx\" (UID: \"dd4dda6b-c6c4-4716-80b2-95ce65914125\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.892934 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jw66c\" (UniqueName: \"kubernetes.io/projected/b988074b-ff7d-4921-b9a7-bda74319fcc9-kube-api-access-jw66c\") pod \"multus-admission-controller-857f4d67dd-tfknb\" (UID: \"b988074b-ff7d-4921-b9a7-bda74319fcc9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" Nov 26 00:09:29 crc kubenswrapper[4875]: E1126 00:09:29.893007 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:30.392963708 +0000 UTC m=+143.582939393 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.893495 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"csi-data-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-csi-data-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.893578 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"mountpoint-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-mountpoint-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.893633 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"plugins-dir\" (UniqueName: \"kubernetes.io/host-path/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-plugins-dir\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.894228 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/dd4dda6b-c6c4-4716-80b2-95ce65914125-config\") pod \"service-ca-operator-777779d784-btrqx\" (UID: \"dd4dda6b-c6c4-4716-80b2-95ce65914125\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.894247 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/bce1a18d-ad14-442b-afb3-78b1c7213379-config\") pod \"kube-apiserver-operator-766d6c64bb-tkvbn\" (UID: \"bce1a18d-ad14-442b-afb3-78b1c7213379\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.894568 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/601c8445-3908-4677-9df9-422f8e97d928-config-volume\") pod \"collect-profiles-29401920-pfkvn\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.899003 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-tls\" (UniqueName: \"kubernetes.io/secret/888a01e1-690c-41d1-bcc2-1f1403a205dd-metrics-tls\") pod \"dns-default-j5qr7\" (UID: \"888a01e1-690c-41d1-bcc2-1f1403a205dd\") " pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.899206 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-certs\" (UniqueName: \"kubernetes.io/secret/b988074b-ff7d-4921-b9a7-bda74319fcc9-webhook-certs\") pod \"multus-admission-controller-857f4d67dd-tfknb\" (UID: \"b988074b-ff7d-4921-b9a7-bda74319fcc9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.899457 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/dd4dda6b-c6c4-4716-80b2-95ce65914125-serving-cert\") pod \"service-ca-operator-777779d784-btrqx\" (UID: \"dd4dda6b-c6c4-4716-80b2-95ce65914125\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.899795 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/bce1a18d-ad14-442b-afb3-78b1c7213379-serving-cert\") pod \"kube-apiserver-operator-766d6c64bb-tkvbn\" (UID: \"bce1a18d-ad14-442b-afb3-78b1c7213379\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.901706 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/601c8445-3908-4677-9df9-422f8e97d928-secret-volume\") pod \"collect-profiles-29401920-pfkvn\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.905121 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert\" (UniqueName: \"kubernetes.io/secret/6cdd823a-c12a-427c-8273-924e5295319c-cert\") pod \"ingress-canary-d5dcs\" (UID: \"6cdd823a-c12a-427c-8273-924e5295319c\") " pod="openshift-ingress-canary/ingress-canary-d5dcs" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.910940 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pc2vl\" (UniqueName: \"kubernetes.io/projected/2d2fae84-4466-4ef1-af30-b6df2e28f931-kube-api-access-pc2vl\") pod \"console-operator-58897d9998-w9htq\" (UID: \"2d2fae84-4466-4ef1-af30-b6df2e28f931\") " pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.931122 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-bound-sa-token\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:29 crc kubenswrapper[4875]: W1126 00:09:29.943297 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf415abb0_de0a_477d_96c0_31efea9964e2.slice/crio-6207de4beb420df6190698cee137f44a92e08a07d95a38ebabda3d674c15e2be WatchSource:0}: Error finding container 6207de4beb420df6190698cee137f44a92e08a07d95a38ebabda3d674c15e2be: Status 404 returned error can't find the container with id 6207de4beb420df6190698cee137f44a92e08a07d95a38ebabda3d674c15e2be Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.956018 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rx4gr\" (UniqueName: \"kubernetes.io/projected/872d7004-c1ed-4411-bfcc-a50cfad48c96-kube-api-access-rx4gr\") pod \"cluster-samples-operator-665b6dd947-dgkwj\" (UID: \"872d7004-c1ed-4411-bfcc-a50cfad48c96\") " pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.966555 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.967662 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-t7rld\" (UniqueName: \"kubernetes.io/projected/9d929481-cfc4-4d91-b925-ac3a9127e007-kube-api-access-t7rld\") pod \"olm-operator-6b444d44fb-dbctw\" (UID: \"9d929481-cfc4-4d91-b925-ac3a9127e007\") " pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.980713 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca/service-ca-9c57cc56f-pq492"] Nov 26 00:09:29 crc kubenswrapper[4875]: I1126 00:09:29.994162 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:29 crc kubenswrapper[4875]: E1126 00:09:29.994468 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:30.494439189 +0000 UTC m=+143.684414884 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.011740 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f22qb\" (UniqueName: \"kubernetes.io/projected/6cdd823a-c12a-427c-8273-924e5295319c-kube-api-access-f22qb\") pod \"ingress-canary-d5dcs\" (UID: \"6cdd823a-c12a-427c-8273-924e5295319c\") " pod="openshift-ingress-canary/ingress-canary-d5dcs" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.024668 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-n8gzv\" (UniqueName: \"kubernetes.io/projected/601c8445-3908-4677-9df9-422f8e97d928-kube-api-access-n8gzv\") pod \"collect-profiles-29401920-pfkvn\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.043572 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.046840 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fqntg\" (UniqueName: \"kubernetes.io/projected/888a01e1-690c-41d1-bcc2-1f1403a205dd-kube-api-access-fqntg\") pod \"dns-default-j5qr7\" (UID: \"888a01e1-690c-41d1-bcc2-1f1403a205dd\") " pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.065372 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/bce1a18d-ad14-442b-afb3-78b1c7213379-kube-api-access\") pod \"kube-apiserver-operator-766d6c64bb-tkvbn\" (UID: \"bce1a18d-ad14-442b-afb3-78b1c7213379\") " pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.083824 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jw66c\" (UniqueName: \"kubernetes.io/projected/b988074b-ff7d-4921-b9a7-bda74319fcc9-kube-api-access-jw66c\") pod \"multus-admission-controller-857f4d67dd-tfknb\" (UID: \"b988074b-ff7d-4921-b9a7-bda74319fcc9\") " pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.085445 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication-operator/authentication-operator-69f744f599-55rwl"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.091156 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.095582 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.095874 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:30.595863229 +0000 UTC m=+143.785838924 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.097461 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.097869 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ingress-canary/ingress-canary-d5dcs" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.103161 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.106880 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.109473 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vksp5\" (UniqueName: \"kubernetes.io/projected/dd4dda6b-c6c4-4716-80b2-95ce65914125-kube-api-access-vksp5\") pod \"service-ca-operator-777779d784-btrqx\" (UID: \"dd4dda6b-c6c4-4716-80b2-95ce65914125\") " pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.143042 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-x9trx\" (UniqueName: \"kubernetes.io/projected/f31ac9d7-e1d6-413f-8749-4f3f13be75c9-kube-api-access-x9trx\") pod \"csi-hostpathplugin-wlmp9\" (UID: \"f31ac9d7-e1d6-413f-8749-4f3f13be75c9\") " pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.190056 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.203258 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.203757 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:30.703741015 +0000 UTC m=+143.893716710 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.204742 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" event={"ID":"28e618ae-43ac-4fc4-83c2-28462346e2a9","Type":"ContainerStarted","Data":"b1384d0f1421938dc5fa9911eb94af0cf8a94cfba432d34d3a4ab65725d1ef85"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.205486 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-pq492" event={"ID":"83b8aa62-fa25-4831-8fba-c08bb0887d14","Type":"ContainerStarted","Data":"67482656da93a422e6bcda7bcbb4f4c53a1d808204ca2d134cfcd8df03177127"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.207531 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" event={"ID":"3a569b99-f211-4204-a526-314e066143c2","Type":"ContainerStarted","Data":"9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.209114 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.215758 4875 generic.go:334] "Generic (PLEG): container finished" podID="a1910aeb-3a24-4922-ada0-a47d871c0998" containerID="a2171379f4106e778f7fa0e7ecebed2a169083583ff824d6d853543f4b76766a" exitCode=0 Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.215813 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" event={"ID":"a1910aeb-3a24-4922-ada0-a47d871c0998","Type":"ContainerDied","Data":"a2171379f4106e778f7fa0e7ecebed2a169083583ff824d6d853543f4b76766a"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.217911 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-nt2m7" event={"ID":"5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36","Type":"ContainerStarted","Data":"a89afe4ef8bd20e10b060ce3880865097ecf16d1351b86483ade37a3cd270068"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.220401 4875 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5mx4v container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.220457 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" podUID="3a569b99-f211-4204-a526-314e066143c2" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.222271 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" event={"ID":"f415abb0-de0a-477d-96c0-31efea9964e2","Type":"ContainerStarted","Data":"6207de4beb420df6190698cee137f44a92e08a07d95a38ebabda3d674c15e2be"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.222980 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.225553 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.226674 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-bplfw" event={"ID":"b226d68e-dbae-401a-88d4-ccc0cb1a51cf","Type":"ContainerStarted","Data":"42b822b8bdc524844685385a0bc5e1829b394d780910f4a14a57c7de2752ebf7"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.229462 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress/router-default-5444994796-bplfw" event={"ID":"b226d68e-dbae-401a-88d4-ccc0cb1a51cf","Type":"ContainerStarted","Data":"dfbba5a82bdbb0f24466997f2bf6bb49361860cb0baf24192baedc55e71b098e"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.243522 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console/downloads-7954f5f757-7s69q"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.246667 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" event={"ID":"d2c99872-a302-4599-96b7-2320051f069c","Type":"ContainerStarted","Data":"fde1701f6155a1fc881ef3360c6a22858dc1a72c88c1cd31f06b20aac20e6e0e"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.246705 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" event={"ID":"d2c99872-a302-4599-96b7-2320051f069c","Type":"ContainerStarted","Data":"a99d62ef0c21a545f2efd987385b38c0ef572a13f304f58573f103845d89da1c"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.248668 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" event={"ID":"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb","Type":"ContainerStarted","Data":"e9a821543376394f8d465bb9dff00e91c635e908c6086a38736b607ca4c2b298"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.249733 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" event={"ID":"c05d440a-597d-4b70-9408-2b3143be393d","Type":"ContainerStarted","Data":"5d6f4414fc9805a898bfe66f2364bf743e19b980f345f3da47ddd1e7dda57297"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.250817 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" event={"ID":"0c1975b8-52f8-4b58-a5f3-cc74156dcd75","Type":"ContainerStarted","Data":"da050fff7f69d82dda1bfbf38a87d2a89d14deaa528227c31d98194063e66cc8"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.251795 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" event={"ID":"1167d13e-be01-4849-a6e8-0b5f505a0caa","Type":"ContainerStarted","Data":"bd64b611136061de21060b86538c41e7bfe0725a420ee75d66cd2059a1ea0254"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.254730 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" event={"ID":"42c80491-a1f7-4f1f-b2a0-db682fbc490e","Type":"ContainerStarted","Data":"f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.255744 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.256589 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" event={"ID":"1eeb4a6d-f2da-4006-aa3a-37f3adb4937b","Type":"ContainerStarted","Data":"d05e304b287aa6fec52d9f6bfbe08d01b63c6addb333dd5cf34fbe885c81ace7"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.259042 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29401920-nz8v2" event={"ID":"55a68700-c0f7-4ae5-a252-013c8fbab545","Type":"ContainerStarted","Data":"f4fc1d6dc90bfe843e93fdbaf3ff36106df6a4b963d9bedb8eef7c7436d2109b"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.260781 4875 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-rlkpg container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.260812 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" podUID="42c80491-a1f7-4f1f-b2a0-db682fbc490e" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.261498 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-l4gfb" event={"ID":"4349f16a-1b36-45b5-aa93-f982bcaa781f","Type":"ContainerStarted","Data":"d01f91ca00394d9d51cf1fc41c01f297a68e5d1732266504daa31a4cbbb71dc9"} Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.261543 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/console-f9d7485db-l4gfb" event={"ID":"4349f16a-1b36-45b5-aa93-f982bcaa781f","Type":"ContainerStarted","Data":"7f3b7e8f589c50366a6ebe3c430769495a62c3a2cadd524abea9a8fd34dfc0f4"} Nov 26 00:09:30 crc kubenswrapper[4875]: W1126 00:09:30.295976 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd04a012b_fcc1_4f75_8e0a_4a4a17d9cf75.slice/crio-3304e437db4cf1c15482703364c4e03bc6ab784a9b3fac6a58a00e59ef94ddf0 WatchSource:0}: Error finding container 3304e437db4cf1c15482703364c4e03bc6ab784a9b3fac6a58a00e59ef94ddf0: Status 404 returned error can't find the container with id 3304e437db4cf1c15482703364c4e03bc6ab784a9b3fac6a58a00e59ef94ddf0 Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.305014 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.307178 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:30.807161616 +0000 UTC m=+143.997137381 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: W1126 00:09:30.311779 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podd752aecd_9a39_430c_9262_c81434b3f08a.slice/crio-ae5615b3e2c68eaf9be82b723e1c95006669f86b4844069e8c7b864744cbf97d WatchSource:0}: Error finding container ae5615b3e2c68eaf9be82b723e1c95006669f86b4844069e8c7b864744cbf97d: Status 404 returned error can't find the container with id ae5615b3e2c68eaf9be82b723e1c95006669f86b4844069e8c7b864744cbf97d Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.333575 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.344090 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.347350 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.349582 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.352535 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.354355 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.359946 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.381379 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.405077 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns-operator/dns-operator-744455d44c-8t466"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.407285 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.407340 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:30.907326433 +0000 UTC m=+144.097302128 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.410731 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.411253 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms"] Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.414049 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:30.914035055 +0000 UTC m=+144.104010750 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.456186 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.477948 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.535837 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.536029 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.036008774 +0000 UTC m=+144.225984469 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.536142 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.536389 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.036381423 +0000 UTC m=+144.226357118 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.557082 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq"] Nov 26 00:09:30 crc kubenswrapper[4875]: W1126 00:09:30.559339 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2f69fc9c_de93_4cae_a491_50698ac10599.slice/crio-3eb2ce2f5e5492b05486f0f8eee8e19b01f850ca3ea95b50850906122f2d13d0 WatchSource:0}: Error finding container 3eb2ce2f5e5492b05486f0f8eee8e19b01f850ca3ea95b50850906122f2d13d0: Status 404 returned error can't find the container with id 3eb2ce2f5e5492b05486f0f8eee8e19b01f850ca3ea95b50850906122f2d13d0 Nov 26 00:09:30 crc kubenswrapper[4875]: W1126 00:09:30.563106 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod1da5021c_2176_41cb_9c3c_d4ccbe299664.slice/crio-6c958a4f4a776bbfee8274d25d1b7810665a3aa5a39fff5138ff80ece67a37c6 WatchSource:0}: Error finding container 6c958a4f4a776bbfee8274d25d1b7810665a3aa5a39fff5138ff80ece67a37c6: Status 404 returned error can't find the container with id 6c958a4f4a776bbfee8274d25d1b7810665a3aa5a39fff5138ff80ece67a37c6 Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.651283 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.651550 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.151522976 +0000 UTC m=+144.341498661 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.651699 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.651997 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.151987098 +0000 UTC m=+144.341962793 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.689964 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v9znd"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.752267 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.752633 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.252608407 +0000 UTC m=+144.442584102 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.761326 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.761727 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.261713111 +0000 UTC m=+144.451688806 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.822162 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-config-operator/openshift-config-operator-7777fb866f-zww4t"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.832179 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6ml5v"] Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.863705 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.863982 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.363944012 +0000 UTC m=+144.553919717 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.864114 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.864976 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.364963167 +0000 UTC m=+144.554938862 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:30 crc kubenswrapper[4875]: I1126 00:09:30.976784 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:30 crc kubenswrapper[4875]: E1126 00:09:30.977106 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.477079562 +0000 UTC m=+144.667055317 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.065607 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn"] Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.078914 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:31 crc kubenswrapper[4875]: E1126 00:09:31.079306 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.579287182 +0000 UTC m=+144.769262947 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.168670 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-dns/dns-default-j5qr7"] Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.174739 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn"] Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.179669 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:31 crc kubenswrapper[4875]: E1126 00:09:31.180046 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.680026994 +0000 UTC m=+144.870002689 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.222772 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.264575 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" start-of-body= Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.264734 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="Get \"http://localhost:1936/healthz/ready\": dial tcp [::1]:1936: connect: connection refused" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.283808 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:31 crc kubenswrapper[4875]: E1126 00:09:31.284489 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.784476432 +0000 UTC m=+144.974452127 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.294076 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" event={"ID":"8f3c50f0-6fff-4f1c-a0b6-f6756823da65","Type":"ContainerStarted","Data":"05d30429ff85388d975d6374c62f02e9e01b0433ddad329148509ee137703348"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.295128 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" event={"ID":"24b77217-7ebc-4165-a7b5-d7171adbc29a","Type":"ContainerStarted","Data":"b526823be400f7f7020ae7cc83e2116221aa359da2576a43bcc4eba9c1a454e1"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.296596 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7s69q" event={"ID":"b118a730-fb77-4632-83f3-9b94b4d52b2c","Type":"ContainerStarted","Data":"bfc8c9ab6959b7c2ba0a8e3e6b22e7bb0946741f4695f2a798077b3103dadbcb"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.301535 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" event={"ID":"715ac3a3-bef6-4122-a4b0-3ea80be8a7eb","Type":"ContainerStarted","Data":"75f8a15f218c1c0329172fcb2d8148e15235adbb0b83a5779d6d0d3b0b9052e4"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.303013 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" event={"ID":"f415abb0-de0a-477d-96c0-31efea9964e2","Type":"ContainerStarted","Data":"14d2b736a420e4c41f4bfb2ae5bf59641164b7da6a8c52d4b4e6ffb9200a0309"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.305272 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" event={"ID":"0c1975b8-52f8-4b58-a5f3-cc74156dcd75","Type":"ContainerStarted","Data":"4df71c809204b31f9ef3283a4c9c0d8855babc9c5d1228630a51a4b2c4d6ad91"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.305510 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.307923 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" event={"ID":"28e618ae-43ac-4fc4-83c2-28462346e2a9","Type":"ContainerStarted","Data":"4bb1e36be5e0307e1608d5ea29b6d5332bb36fbe244f2df3da990023124fff9b"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.311799 4875 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-t2hxd container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" start-of-body= Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.312795 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" event={"ID":"1167d13e-be01-4849-a6e8-0b5f505a0caa","Type":"ContainerStarted","Data":"2f03c849e63d9f1d4534e7b131df2d89165cb1d5b0b48bdfa86eddb36833bfe6"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.312945 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" podUID="0c1975b8-52f8-4b58-a5f3-cc74156dcd75" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.318090 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" event={"ID":"1eeb4a6d-f2da-4006-aa3a-37f3adb4937b","Type":"ContainerStarted","Data":"ce0fcee4a1f138c2463b64b6e455a8273de4997f8e414be48daa5b6641a56566"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.322817 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" event={"ID":"2f69fc9c-de93-4cae-a491-50698ac10599","Type":"ContainerStarted","Data":"3eb2ce2f5e5492b05486f0f8eee8e19b01f850ca3ea95b50850906122f2d13d0"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.324385 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" event={"ID":"49bb8ae9-ab85-458a-850e-8d3b48ab42de","Type":"ContainerStarted","Data":"86a89a1a54a41d281d294efc5c5d42a66e418c03414e1d01d624228fa28ac9a3"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.326160 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-server-nt2m7" event={"ID":"5e0f9c3c-fb3f-463b-a69c-57fc40cf2f36","Type":"ContainerStarted","Data":"eca486a0dd82f7c811d137b51ddf60557015bc4bef822f86b696fe04ef2393ca"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.328716 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" event={"ID":"0dc6d7a2-494d-462d-8972-de55d1e36f31","Type":"ContainerStarted","Data":"f3569774bc1529629e7c5530e8d87c83081c6639b71a07c2a2f5272a3050ad56"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.330185 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" event={"ID":"d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75","Type":"ContainerStarted","Data":"3304e437db4cf1c15482703364c4e03bc6ab784a9b3fac6a58a00e59ef94ddf0"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.332251 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" event={"ID":"986802f7-9a9e-4191-8545-0ccda0c2bcf5","Type":"ContainerStarted","Data":"8a09c7535dabe007b21e90a7cc7fcd851a640ccd5c85ac3fcd8b89bbb6c3403a"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.334461 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca/service-ca-9c57cc56f-pq492" event={"ID":"83b8aa62-fa25-4831-8fba-c08bb0887d14","Type":"ContainerStarted","Data":"d2a36408f9d20737cf1575ef72a10860a28491393f0c43314f0144006ab362da"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.335449 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr" event={"ID":"1da5021c-2176-41cb-9c3c-d4ccbe299664","Type":"ContainerStarted","Data":"6c958a4f4a776bbfee8274d25d1b7810665a3aa5a39fff5138ff80ece67a37c6"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.336896 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" event={"ID":"d752aecd-9a39-430c-9262-c81434b3f08a","Type":"ContainerStarted","Data":"ae5615b3e2c68eaf9be82b723e1c95006669f86b4844069e8c7b864744cbf97d"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.338166 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-8t466" event={"ID":"84a8202a-8464-4308-8ecb-dfb811933d5b","Type":"ContainerStarted","Data":"d373fec92c8022389cfccefc3c1c540375ff4d20b595d65f9b3fe4431b623352"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.339599 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" event={"ID":"baf3ba36-cf4a-4780-b954-913da501aab7","Type":"ContainerStarted","Data":"264c1fa46c4f85e48cdb049b0cb4c1021a2211bcd25380dc2405f4858ace4954"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.341980 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" event={"ID":"0a3941af-01d7-44c0-8e51-cc719e7f4835","Type":"ContainerStarted","Data":"0bb75f288c5837dc214b487a88e223004e96f1ca1a50b783a71a6d9448f5c75d"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.346769 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" event={"ID":"d725db0e-68dc-4655-ae0f-8e847ef651c3","Type":"ContainerStarted","Data":"f626bdeb5ea4d83aa18d9986371b58f7182fe60152c309ed0bf2d3d9f5dbaf65"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.348563 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" event={"ID":"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e","Type":"ContainerStarted","Data":"d0f99bdcfa64ea8d5c2d2bfcb8b24522c6ab2499abd26c65bf816403011abfca"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.350252 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" event={"ID":"c05d440a-597d-4b70-9408-2b3143be393d","Type":"ContainerStarted","Data":"f2652f6f2f8d716bfde791ef96f224cbb77d4dfd547ea905342b63e576dbfb07"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.353370 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" event={"ID":"85ee0261-b699-4159-9ee7-123628b1f1c0","Type":"ContainerStarted","Data":"219eb0384686302c660668754a998c219743973063be1cc0a4cfca97883a7bc4"} Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.354227 4875 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5mx4v container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.354271 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" podUID="3a569b99-f211-4204-a526-314e066143c2" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.356462 4875 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-rlkpg container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.356633 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" podUID="42c80491-a1f7-4f1f-b2a0-db682fbc490e" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.384887 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:31 crc kubenswrapper[4875]: E1126 00:09:31.387094 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.887072312 +0000 UTC m=+145.077048007 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.467294 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-console-operator/console-operator-58897d9998-w9htq"] Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.471226 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/multus-admission-controller-857f4d67dd-tfknb"] Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.486711 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj"] Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.487491 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:31 crc kubenswrapper[4875]: E1126 00:09:31.487925 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:31.987913396 +0000 UTC m=+145.177889091 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.515085 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["hostpath-provisioner/csi-hostpathplugin-wlmp9"] Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.520086 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-service-ca-operator/service-ca-operator-777779d784-btrqx"] Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.588202 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:31 crc kubenswrapper[4875]: E1126 00:09:31.588616 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:32.088597216 +0000 UTC m=+145.278572911 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.596100 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" podStartSLOduration=123.596083689 podStartE2EDuration="2m3.596083689s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:31.594804196 +0000 UTC m=+144.784779891" watchObservedRunningTime="2025-11-26 00:09:31.596083689 +0000 UTC m=+144.786059384" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.656130 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw"] Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.666640 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-ingress-canary/ingress-canary-d5dcs"] Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.686619 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-server-nt2m7" podStartSLOduration=5.6865934970000005 podStartE2EDuration="5.686593497s" podCreationTimestamp="2025-11-26 00:09:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:31.682534723 +0000 UTC m=+144.872510428" watchObservedRunningTime="2025-11-26 00:09:31.686593497 +0000 UTC m=+144.876569192" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.689843 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:31 crc kubenswrapper[4875]: E1126 00:09:31.691754 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:32.19173618 +0000 UTC m=+145.381711875 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:31 crc kubenswrapper[4875]: W1126 00:09:31.717383 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod2d2fae84_4466_4ef1_af30_b6df2e28f931.slice/crio-57ce9d440b695a5be11e541bacf5cf1734bebb5d12b74161b7be4f0fb605ef93 WatchSource:0}: Error finding container 57ce9d440b695a5be11e541bacf5cf1734bebb5d12b74161b7be4f0fb605ef93: Status 404 returned error can't find the container with id 57ce9d440b695a5be11e541bacf5cf1734bebb5d12b74161b7be4f0fb605ef93 Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.722184 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager-operator/openshift-controller-manager-operator-756b6f6bc6-bbsj7" podStartSLOduration=123.722161443 podStartE2EDuration="2m3.722161443s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:31.721338852 +0000 UTC m=+144.911314547" watchObservedRunningTime="2025-11-26 00:09:31.722161443 +0000 UTC m=+144.912137138" Nov 26 00:09:31 crc kubenswrapper[4875]: W1126 00:09:31.742238 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podb988074b_ff7d_4921_b9a7_bda74319fcc9.slice/crio-50138c43fca738a2114e6da42fb2f84156ab698e1db0761870ee24912985d32f WatchSource:0}: Error finding container 50138c43fca738a2114e6da42fb2f84156ab698e1db0761870ee24912985d32f: Status 404 returned error can't find the container with id 50138c43fca738a2114e6da42fb2f84156ab698e1db0761870ee24912985d32f Nov 26 00:09:31 crc kubenswrapper[4875]: W1126 00:09:31.743356 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-podf31ac9d7_e1d6_413f_8749_4f3f13be75c9.slice/crio-8e838b9b27bfa9531194b47657041d57e14d44c480cba5bbb369bcfeacf4d0a7 WatchSource:0}: Error finding container 8e838b9b27bfa9531194b47657041d57e14d44c480cba5bbb369bcfeacf4d0a7: Status 404 returned error can't find the container with id 8e838b9b27bfa9531194b47657041d57e14d44c480cba5bbb369bcfeacf4d0a7 Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.762400 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" podStartSLOduration=123.762379207 podStartE2EDuration="2m3.762379207s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:31.759930485 +0000 UTC m=+144.949906180" watchObservedRunningTime="2025-11-26 00:09:31.762379207 +0000 UTC m=+144.952354902" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.790710 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:31 crc kubenswrapper[4875]: E1126 00:09:31.797320 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:32.297266295 +0000 UTC m=+145.487241990 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.806372 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-etcd-operator/etcd-operator-b45778765-9r68s" podStartSLOduration=123.806354249 podStartE2EDuration="2m3.806354249s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:31.804132342 +0000 UTC m=+144.994108057" watchObservedRunningTime="2025-11-26 00:09:31.806354249 +0000 UTC m=+144.996329944" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.884086 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-pruner-29401920-nz8v2" podStartSLOduration=125.884024548 podStartE2EDuration="2m5.884024548s" podCreationTimestamp="2025-11-26 00:07:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:31.841721379 +0000 UTC m=+145.031697084" watchObservedRunningTime="2025-11-26 00:09:31.884024548 +0000 UTC m=+145.074000243" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.886911 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress/router-default-5444994796-bplfw" podStartSLOduration=123.886885601 podStartE2EDuration="2m3.886885601s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:31.883889884 +0000 UTC m=+145.073865589" watchObservedRunningTime="2025-11-26 00:09:31.886885601 +0000 UTC m=+145.076861286" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.892717 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:31 crc kubenswrapper[4875]: E1126 00:09:31.893074 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:32.39306035 +0000 UTC m=+145.583036045 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.927211 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/console-f9d7485db-l4gfb" podStartSLOduration=123.927188848 podStartE2EDuration="2m3.927188848s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:31.92529823 +0000 UTC m=+145.115273955" watchObservedRunningTime="2025-11-26 00:09:31.927188848 +0000 UTC m=+145.117164543" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.990449 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca/service-ca-9c57cc56f-pq492" podStartSLOduration=123.990428955 podStartE2EDuration="2m3.990428955s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:31.988985688 +0000 UTC m=+145.178961393" watchObservedRunningTime="2025-11-26 00:09:31.990428955 +0000 UTC m=+145.180404650" Nov 26 00:09:31 crc kubenswrapper[4875]: I1126 00:09:31.994751 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:31 crc kubenswrapper[4875]: E1126 00:09:31.995388 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:32.495365593 +0000 UTC m=+145.685341288 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.009920 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver-operator/openshift-apiserver-operator-796bbdcf4f-mgkmk" podStartSLOduration=125.009898836 podStartE2EDuration="2m5.009898836s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:32.007159956 +0000 UTC m=+145.197135651" watchObservedRunningTime="2025-11-26 00:09:32.009898836 +0000 UTC m=+145.199874521" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.050119 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/machine-api-operator-5694c8668f-pkmj5" podStartSLOduration=124.05009771 podStartE2EDuration="2m4.05009771s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:32.045081341 +0000 UTC m=+145.235057036" watchObservedRunningTime="2025-11-26 00:09:32.05009771 +0000 UTC m=+145.240073405" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.084285 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" podStartSLOduration=124.084269949 podStartE2EDuration="2m4.084269949s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:32.082869314 +0000 UTC m=+145.272845009" watchObservedRunningTime="2025-11-26 00:09:32.084269949 +0000 UTC m=+145.274245644" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.097522 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.098026 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:32.598005913 +0000 UTC m=+145.787981608 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.123156 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager-operator/kube-controller-manager-operator-78b949d7b-gw6b8" podStartSLOduration=124.12314086 podStartE2EDuration="2m4.12314086s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:32.121760685 +0000 UTC m=+145.311736380" watchObservedRunningTime="2025-11-26 00:09:32.12314086 +0000 UTC m=+145.313116555" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.198480 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.198688 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:32.698626412 +0000 UTC m=+145.888602107 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.198942 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.199262 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:32.699255088 +0000 UTC m=+145.889230783 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.267556 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:32 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:32 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:32 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.267601 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.299755 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.300164 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:32.800124783 +0000 UTC m=+145.990100478 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.364798 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" event={"ID":"28e618ae-43ac-4fc4-83c2-28462346e2a9","Type":"ContainerStarted","Data":"814f0234bffb149484fef57fabe9988869ee0e0895babc90b91a4fd0a6444f1c"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.367174 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" event={"ID":"872d7004-c1ed-4411-bfcc-a50cfad48c96","Type":"ContainerStarted","Data":"fb9907d5c574230622ea45e36520794be782c09ecbb129adc1a6e49c7a4f50f7"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.368268 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-w9htq" event={"ID":"2d2fae84-4466-4ef1-af30-b6df2e28f931","Type":"ContainerStarted","Data":"57ce9d440b695a5be11e541bacf5cf1734bebb5d12b74161b7be4f0fb605ef93"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.370805 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-j5qr7" event={"ID":"888a01e1-690c-41d1-bcc2-1f1403a205dd","Type":"ContainerStarted","Data":"7ab958c8b733b4244cced3b824167f8d4ba990c430691a5df6df35844c1a3ef3"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.372496 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" event={"ID":"f31ac9d7-e1d6-413f-8749-4f3f13be75c9","Type":"ContainerStarted","Data":"8e838b9b27bfa9531194b47657041d57e14d44c480cba5bbb369bcfeacf4d0a7"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.373870 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" event={"ID":"8f3c50f0-6fff-4f1c-a0b6-f6756823da65","Type":"ContainerStarted","Data":"ea283984c9dcf0eff1fd1ac543b1b4e655c3008a6d98e3d56c5a1c74dea1d06b"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.374883 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" event={"ID":"d752aecd-9a39-430c-9262-c81434b3f08a","Type":"ContainerStarted","Data":"0a19299311e3161f466a27fe92b7ca17a798ad5a7c1b1d9c567e86a552b931b0"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.376475 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" event={"ID":"dd4dda6b-c6c4-4716-80b2-95ce65914125","Type":"ContainerStarted","Data":"180ce7510c5783c3b23620dfe401a3a258ab1a42ca0dc4cdaeee4b50f3b21cf9"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.379088 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" event={"ID":"c05d440a-597d-4b70-9408-2b3143be393d","Type":"ContainerStarted","Data":"e36973601818a8c88cff37d267b4e4f53d87a1fc3ffdc18349793e6080781dd2"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.379983 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" event={"ID":"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e","Type":"ContainerStarted","Data":"a877faa5803676f3a13b52b045321b35e0c3882ba7526a954f86efe7c4ac2a86"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.383487 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" event={"ID":"a1910aeb-3a24-4922-ada0-a47d871c0998","Type":"ContainerStarted","Data":"ea93e68a1631151161b9fceae19ab9e10c334f8c32b20a877d171fc9fa1fb5b8"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.385067 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" event={"ID":"baf3ba36-cf4a-4780-b954-913da501aab7","Type":"ContainerStarted","Data":"e4b4027bddfaecc3a623bded6176c2da99bb0c7db7b76b5e765358c11f2fce18"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.386825 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-8t466" event={"ID":"84a8202a-8464-4308-8ecb-dfb811933d5b","Type":"ContainerStarted","Data":"58bdf99e7e323fc5c889ad50d24535b695bba69a30070f1e802cb98beed08cf8"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.401493 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.401874 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" event={"ID":"d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75","Type":"ContainerStarted","Data":"3d34f01b0a531414b18b4df5271df3ec4bee5d3668f7069fd2c0bdf3e5fdd5b4"} Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.402021 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:32.901998145 +0000 UTC m=+146.091973830 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.403280 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" event={"ID":"986802f7-9a9e-4191-8545-0ccda0c2bcf5","Type":"ContainerStarted","Data":"99334a8d1b239e19a8bab8a92db4c98e824dfa15099983f7df275ec77d56d76d"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.407003 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" event={"ID":"0dc6d7a2-494d-462d-8972-de55d1e36f31","Type":"ContainerStarted","Data":"e3185d18089c7c01b7ffb19acbd7184d7324ff1f153d3e21891140f1e6905c23"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.408916 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" event={"ID":"24b77217-7ebc-4165-a7b5-d7171adbc29a","Type":"ContainerStarted","Data":"5005afcddbad2d32560aecda06ced30fdfdca8848fcb59e26f19129741c5b42a"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.410153 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" event={"ID":"9d929481-cfc4-4d91-b925-ac3a9127e007","Type":"ContainerStarted","Data":"0ec4ea102adbe350275fdc7850b4aa2eabfe37a767f22c40f23b29487c43a41c"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.411682 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" event={"ID":"85ee0261-b699-4159-9ee7-123628b1f1c0","Type":"ContainerStarted","Data":"664929cf4bd5a3f03a17f3b3d422584eb61a724c2ea28a345d556d92cf850f92"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.412803 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" event={"ID":"2f69fc9c-de93-4cae-a491-50698ac10599","Type":"ContainerStarted","Data":"d9979ab2dd6f47c46ce980bada1ba49d99035618f10c3cf3b063ee71d321fe72"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.413712 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr" event={"ID":"1da5021c-2176-41cb-9c3c-d4ccbe299664","Type":"ContainerStarted","Data":"7f4904414b6aeae8246914de5b2e17ca2ff61478085393ab34cbb37b35a0c377"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.414465 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" event={"ID":"601c8445-3908-4677-9df9-422f8e97d928","Type":"ContainerStarted","Data":"fab8ed9cb967a8c3e469bd34e98ed5170acf90287640122afef9505b4bddb2e8"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.415453 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7s69q" event={"ID":"b118a730-fb77-4632-83f3-9b94b4d52b2c","Type":"ContainerStarted","Data":"380a17ba4f04785116c8889678205926dc189280d0ba8398772598d38d524232"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.415653 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7s69q" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.416284 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" event={"ID":"b988074b-ff7d-4921-b9a7-bda74319fcc9","Type":"ContainerStarted","Data":"50138c43fca738a2114e6da42fb2f84156ab698e1db0761870ee24912985d32f"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.417219 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" event={"ID":"bce1a18d-ad14-442b-afb3-78b1c7213379","Type":"ContainerStarted","Data":"2540a600f004d5d711d0138c8f5d324d6abd08ac4bd549e9672c87727f68786a"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.417969 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.418012 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.418391 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-d5dcs" event={"ID":"6cdd823a-c12a-427c-8273-924e5295319c","Type":"ContainerStarted","Data":"e217cbafb94420fbb355ee52b360ba971233baad9a029715c8cd5b971f8c144a"} Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.418976 4875 patch_prober.go:28] interesting pod/controller-manager-879f6c89f-5mx4v container/controller-manager namespace/openshift-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" start-of-body= Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.419004 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" podUID="3a569b99-f211-4204-a526-314e066143c2" containerName="controller-manager" probeResult="failure" output="Get \"https://10.217.0.17:8443/healthz\": dial tcp 10.217.0.17:8443: connect: connection refused" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.419006 4875 patch_prober.go:28] interesting pod/route-controller-manager-6576b87f9c-rlkpg container/route-controller-manager namespace/openshift-route-controller-manager: Readiness probe status=failure output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" start-of-body= Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.419044 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" podUID="42c80491-a1f7-4f1f-b2a0-db682fbc490e" containerName="route-controller-manager" probeResult="failure" output="Get \"https://10.217.0.6:8443/healthz\": dial tcp 10.217.0.6:8443: connect: connection refused" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.420476 4875 patch_prober.go:28] interesting pod/packageserver-d55dfcdfc-t2hxd container/packageserver namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" start-of-body= Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.420535 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" podUID="0c1975b8-52f8-4b58-a5f3-cc74156dcd75" containerName="packageserver" probeResult="failure" output="Get \"https://10.217.0.38:5443/healthz\": dial tcp 10.217.0.38:5443: connect: connection refused" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.507319 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.507697 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.007670944 +0000 UTC m=+146.197646659 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.509830 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.515636 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.015609287 +0000 UTC m=+146.205584982 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.610553 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.610782 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.110739335 +0000 UTC m=+146.300715030 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.611168 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.611731 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.111712121 +0000 UTC m=+146.301687816 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.682434 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console/downloads-7954f5f757-7s69q" podStartSLOduration=124.682391819 podStartE2EDuration="2m4.682391819s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:32.677717879 +0000 UTC m=+145.867693574" watchObservedRunningTime="2025-11-26 00:09:32.682391819 +0000 UTC m=+145.872367514" Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.713180 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.713327 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.213307455 +0000 UTC m=+146.403283150 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.713540 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.713853 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.213845788 +0000 UTC m=+146.403821483 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.814874 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.815022 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.314991681 +0000 UTC m=+146.504967376 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.817845 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.818264 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.318243614 +0000 UTC m=+146.508219309 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.919654 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.920379 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.42029078 +0000 UTC m=+146.610266475 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:32 crc kubenswrapper[4875]: I1126 00:09:32.920912 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:32 crc kubenswrapper[4875]: E1126 00:09:32.921529 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.421518811 +0000 UTC m=+146.611494506 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.023646 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.024084 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.52405457 +0000 UTC m=+146.714030265 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.125848 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.126323 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.62630452 +0000 UTC m=+146.816280215 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.227487 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.228017 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.727770692 +0000 UTC m=+146.917746397 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.228319 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.228374 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.728360197 +0000 UTC m=+146.918335892 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.229794 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:33 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:33 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:33 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.230131 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.329979 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.330157 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.830132345 +0000 UTC m=+147.020108050 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.330349 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.330758 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.830746761 +0000 UTC m=+147.020722456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.428736 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-canary/ingress-canary-d5dcs" event={"ID":"6cdd823a-c12a-427c-8273-924e5295319c","Type":"ContainerStarted","Data":"45927c47021893d9f2fe18b8dc72e6d2d8d695b983012cde77046262cf8b4c42"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.431277 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.431692 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:33.931675937 +0000 UTC m=+147.121651632 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.435243 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" event={"ID":"85ee0261-b699-4159-9ee7-123628b1f1c0","Type":"ContainerStarted","Data":"00cd67b4c871238303c9488da7e66a03c3fedaf50bf62f310fb1508936a957c8"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.437552 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" event={"ID":"d725db0e-68dc-4655-ae0f-8e847ef651c3","Type":"ContainerStarted","Data":"3750e44f310d5540d69e1a766085b11f8211445703ac851ff330ac7ec2368c45"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.438905 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.440650 4875 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-6ml5v container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.440721 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" podUID="d725db0e-68dc-4655-ae0f-8e847ef651c3" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.441674 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" event={"ID":"bce1a18d-ad14-442b-afb3-78b1c7213379","Type":"ContainerStarted","Data":"f8661a2eca23674311af899156985f159d9132254c7ae746a22d80741804573a"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.444049 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" event={"ID":"dd4dda6b-c6c4-4716-80b2-95ce65914125","Type":"ContainerStarted","Data":"1dc293143c6e7274dac9f2f7f834c3e6293ba6412907bcaed75233f19fe9667a"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.449263 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" event={"ID":"601c8445-3908-4677-9df9-422f8e97d928","Type":"ContainerStarted","Data":"bcabc62376cc6ca6ddb76a0e359de381ea0bfbe911c20bf1081c563a5bb264b9"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.449621 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-canary/ingress-canary-d5dcs" podStartSLOduration=6.449594749 podStartE2EDuration="6.449594749s" podCreationTimestamp="2025-11-26 00:09:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:33.447749101 +0000 UTC m=+146.637724796" watchObservedRunningTime="2025-11-26 00:09:33.449594749 +0000 UTC m=+146.639570444" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.461871 4875 generic.go:334] "Generic (PLEG): container finished" podID="2f69fc9c-de93-4cae-a491-50698ac10599" containerID="d9979ab2dd6f47c46ce980bada1ba49d99035618f10c3cf3b063ee71d321fe72" exitCode=0 Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.462623 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" event={"ID":"2f69fc9c-de93-4cae-a491-50698ac10599","Type":"ContainerDied","Data":"d9979ab2dd6f47c46ce980bada1ba49d99035618f10c3cf3b063ee71d321fe72"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.489851 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr" event={"ID":"1da5021c-2176-41cb-9c3c-d4ccbe299664","Type":"ContainerStarted","Data":"ff1e6c039c299e309a3d77444a073334f0dc9ef7bb55aa8076b0d3213140034e"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.493617 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-operator-74547568cd-bsvcq" podStartSLOduration=125.49359007 podStartE2EDuration="2m5.49359007s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:33.490865961 +0000 UTC m=+146.680841656" watchObservedRunningTime="2025-11-26 00:09:33.49359007 +0000 UTC m=+146.683565765" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.526502 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" podStartSLOduration=125.526483177 podStartE2EDuration="2m5.526483177s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:33.525170484 +0000 UTC m=+146.715146179" watchObservedRunningTime="2025-11-26 00:09:33.526483177 +0000 UTC m=+146.716458872" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.538780 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.541165 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:34.041144714 +0000 UTC m=+147.231120619 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.571668 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.571718 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-j5qr7" event={"ID":"888a01e1-690c-41d1-bcc2-1f1403a205dd","Type":"ContainerStarted","Data":"194a9935311c089af3fe82ef90c2683f65d85134ffea8c980a6972907e2e3010"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.571739 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" event={"ID":"baf3ba36-cf4a-4780-b954-913da501aab7","Type":"ContainerStarted","Data":"2ea2012c6ce3c131393e2214594753b96e869e4d1a015023017d042d27aff827"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.572047 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" event={"ID":"b988074b-ff7d-4921-b9a7-bda74319fcc9","Type":"ContainerStarted","Data":"1d6435d7ce5a8af62fd57b244dfa0a0459be88879db639b8023bb49d31284fd9"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.587698 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-service-ca-operator/service-ca-operator-777779d784-btrqx" podStartSLOduration=125.587662981 podStartE2EDuration="2m5.587662981s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:33.568475497 +0000 UTC m=+146.758451192" watchObservedRunningTime="2025-11-26 00:09:33.587662981 +0000 UTC m=+146.777638666" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.590968 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns-operator/dns-operator-744455d44c-8t466" event={"ID":"84a8202a-8464-4308-8ecb-dfb811933d5b","Type":"ContainerStarted","Data":"7a20cf44ab70d702770eb9f26ef863c10ce2398d56db9a154e78b875f10bc5ca"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.616441 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver-operator/kube-apiserver-operator-766d6c64bb-tkvbn" podStartSLOduration=125.61639991 podStartE2EDuration="2m5.61639991s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:33.613403743 +0000 UTC m=+146.803379438" watchObservedRunningTime="2025-11-26 00:09:33.61639991 +0000 UTC m=+146.806375605" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.639861 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.642184 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:34.142163394 +0000 UTC m=+147.332139089 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.648681 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" event={"ID":"872d7004-c1ed-4411-bfcc-a50cfad48c96","Type":"ContainerStarted","Data":"70f506baacb2586f78175a4a871be7dfcc61afee54e1b99054a55ed56fca75ae"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.648738 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" event={"ID":"872d7004-c1ed-4411-bfcc-a50cfad48c96","Type":"ContainerStarted","Data":"543e37ac25c0e8199546edfa40883725407b89c28dceed8458b45b02fa72909e"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.648912 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" podStartSLOduration=126.648895117 podStartE2EDuration="2m6.648895117s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:33.648135888 +0000 UTC m=+146.838111583" watchObservedRunningTime="2025-11-26 00:09:33.648895117 +0000 UTC m=+146.838870812" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.655949 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console-operator/console-operator-58897d9998-w9htq" event={"ID":"2d2fae84-4466-4ef1-af30-b6df2e28f931","Type":"ContainerStarted","Data":"5439d6b0a2530cafa00b5b53f6ff347d7f656398d374f0592fef11d85ad0559f"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.658569 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.663626 4875 patch_prober.go:28] interesting pod/console-operator-58897d9998-w9htq container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.19:8443/readyz\": dial tcp 10.217.0.19:8443: connect: connection refused" start-of-body= Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.663706 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-w9htq" podUID="2d2fae84-4466-4ef1-af30-b6df2e28f931" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/readyz\": dial tcp 10.217.0.19:8443: connect: connection refused" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.677173 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" podStartSLOduration=125.677150324 podStartE2EDuration="2m5.677150324s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:33.674757062 +0000 UTC m=+146.864732777" watchObservedRunningTime="2025-11-26 00:09:33.677150324 +0000 UTC m=+146.867126019" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.702217 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" event={"ID":"49bb8ae9-ab85-458a-850e-8d3b48ab42de","Type":"ContainerStarted","Data":"c95c481c4387cef00e82b37a5d808bc1fb0ef477f26fd634ff482a0f7902bc1f"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.720487 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" event={"ID":"9d929481-cfc4-4d91-b925-ac3a9127e007","Type":"ContainerStarted","Data":"ab63bdf59d33e2ddce3abc4dc1363866d9a9bb05fc69acd98ad2035b2af92409"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.721261 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.729811 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" event={"ID":"0a3941af-01d7-44c0-8e51-cc719e7f4835","Type":"ContainerStarted","Data":"af7676a12e680000a4bbfb6416b9279cf7d15c777d173b8abd6af2f459645244"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.730632 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.740730 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns-operator/dns-operator-744455d44c-8t466" podStartSLOduration=125.740690429 podStartE2EDuration="2m5.740690429s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:33.736356517 +0000 UTC m=+146.926332212" watchObservedRunningTime="2025-11-26 00:09:33.740690429 +0000 UTC m=+146.930666124" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.748826 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.751289 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:34.25126974 +0000 UTC m=+147.441245635 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.770348 4875 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-dbctw container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" start-of-body= Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.770898 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" podUID="9d929481-cfc4-4d91-b925-ac3a9127e007" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.780939 4875 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-v9znd container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.781001 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" podUID="0a3941af-01d7-44c0-8e51-cc719e7f4835" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.784090 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" event={"ID":"a1910aeb-3a24-4922-ada0-a47d871c0998","Type":"ContainerStarted","Data":"8d20f7f33c358ccfccc29e2abd692c24b22fab457d8a1e3b12ce840f234cfbc6"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.797200 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" event={"ID":"b0ac0929-e6e0-464c-bb52-aadcea8d2f3e","Type":"ContainerStarted","Data":"655618e516fc183521c0a3da99b7ce9ddcebe11362f1ce875de1f6f6b50b91f0"} Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.800671 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.800708 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.801345 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.801420 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.815148 4875 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-62q8r container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.815237 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" podUID="d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.850093 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.852047 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:34.352026843 +0000 UTC m=+147.542002538 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.900911 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ingress-operator/ingress-operator-5b745b69d9-gd8c4" podStartSLOduration=125.90088147 podStartE2EDuration="2m5.90088147s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:33.835991071 +0000 UTC m=+147.025966766" watchObservedRunningTime="2025-11-26 00:09:33.90088147 +0000 UTC m=+147.090857165" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.927865 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator/migrator-59844c95c7-qxqtr" podStartSLOduration=125.927845644 podStartE2EDuration="2m5.927845644s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:33.926270074 +0000 UTC m=+147.116245769" watchObservedRunningTime="2025-11-26 00:09:33.927845644 +0000 UTC m=+147.117821339" Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.955670 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:33 crc kubenswrapper[4875]: E1126 00:09:33.956722 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:34.456699307 +0000 UTC m=+147.646675002 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:33 crc kubenswrapper[4875]: I1126 00:09:33.986125 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-dns/dns-default-j5qr7" podStartSLOduration=7.986106973 podStartE2EDuration="7.986106973s" podCreationTimestamp="2025-11-26 00:09:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:33.984508272 +0000 UTC m=+147.174483967" watchObservedRunningTime="2025-11-26 00:09:33.986106973 +0000 UTC m=+147.176082668" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.040961 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" podStartSLOduration=127.040945364 podStartE2EDuration="2m7.040945364s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.022275213 +0000 UTC m=+147.212250908" watchObservedRunningTime="2025-11-26 00:09:34.040945364 +0000 UTC m=+147.230921059" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.041475 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-storage-version-migrator-operator/kube-storage-version-migrator-operator-b67b599dd-xfnw9" podStartSLOduration=126.041471777 podStartE2EDuration="2m6.041471777s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.040216916 +0000 UTC m=+147.230192611" watchObservedRunningTime="2025-11-26 00:09:34.041471777 +0000 UTC m=+147.231447462" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.057811 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.058214 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:34.558194638 +0000 UTC m=+147.748170333 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.086659 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" podStartSLOduration=126.086637009 podStartE2EDuration="2m6.086637009s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.08163241 +0000 UTC m=+147.271608115" watchObservedRunningTime="2025-11-26 00:09:34.086637009 +0000 UTC m=+147.276612704" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.142547 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-samples-operator/cluster-samples-operator-665b6dd947-dgkwj" podStartSLOduration=127.142529458 podStartE2EDuration="2m7.142529458s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.117819802 +0000 UTC m=+147.307795517" watchObservedRunningTime="2025-11-26 00:09:34.142529458 +0000 UTC m=+147.332505153" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.143246 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-config-operator/machine-config-controller-84d6567774-mpgjt" podStartSLOduration=126.143241286 podStartE2EDuration="2m6.143241286s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.140465894 +0000 UTC m=+147.330441599" watchObservedRunningTime="2025-11-26 00:09:34.143241286 +0000 UTC m=+147.333216981" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.159261 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.159686 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:34.659671979 +0000 UTC m=+147.849647674 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.173164 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" podStartSLOduration=127.173147135 podStartE2EDuration="2m7.173147135s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.168715901 +0000 UTC m=+147.358691596" watchObservedRunningTime="2025-11-26 00:09:34.173147135 +0000 UTC m=+147.363122840" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.197318 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication-operator/authentication-operator-69f744f599-55rwl" podStartSLOduration=127.197280427 podStartE2EDuration="2m7.197280427s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.197245316 +0000 UTC m=+147.387221011" watchObservedRunningTime="2025-11-26 00:09:34.197280427 +0000 UTC m=+147.387256122" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.230980 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:34 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:34 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:34 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.231056 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.260387 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.260922 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:34.760902683 +0000 UTC m=+147.950878378 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.264176 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/cluster-image-registry-operator-dc59b4c8b-54wbh" podStartSLOduration=126.264143797 podStartE2EDuration="2m6.264143797s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.227509334 +0000 UTC m=+147.417485029" watchObservedRunningTime="2025-11-26 00:09:34.264143797 +0000 UTC m=+147.454119502" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.283403 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-machine-api/control-plane-machine-set-operator-78cbb6b69f-rln4j" podStartSLOduration=126.283384741 podStartE2EDuration="2m6.283384741s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.282665163 +0000 UTC m=+147.472640878" watchObservedRunningTime="2025-11-26 00:09:34.283384741 +0000 UTC m=+147.473360436" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.354850 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-scheduler-operator/openshift-kube-scheduler-operator-5fdd9b5758-8kdf7" podStartSLOduration=126.35483271 podStartE2EDuration="2m6.35483271s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.318075785 +0000 UTC m=+147.508051480" watchObservedRunningTime="2025-11-26 00:09:34.35483271 +0000 UTC m=+147.544808395" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.355291 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" podStartSLOduration=126.355286162 podStartE2EDuration="2m6.355286162s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.352690495 +0000 UTC m=+147.542666190" watchObservedRunningTime="2025-11-26 00:09:34.355286162 +0000 UTC m=+147.545261857" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.362346 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.362772 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:34.862760794 +0000 UTC m=+148.052736489 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.377870 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-console-operator/console-operator-58897d9998-w9htq" podStartSLOduration=126.377808061 podStartE2EDuration="2m6.377808061s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.376818886 +0000 UTC m=+147.566794581" watchObservedRunningTime="2025-11-26 00:09:34.377808061 +0000 UTC m=+147.567783756" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.400543 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" podStartSLOduration=126.400522466 podStartE2EDuration="2m6.400522466s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.397531779 +0000 UTC m=+147.587507464" watchObservedRunningTime="2025-11-26 00:09:34.400522466 +0000 UTC m=+147.590498161" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.420430 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-cluster-machine-approver/machine-approver-56656f9798-7dk2l" podStartSLOduration=128.420395667 podStartE2EDuration="2m8.420395667s" podCreationTimestamp="2025-11-26 00:07:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.417460111 +0000 UTC m=+147.607435806" watchObservedRunningTime="2025-11-26 00:09:34.420395667 +0000 UTC m=+147.610371362" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.463514 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.463676 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:34.96365211 +0000 UTC m=+148.153627795 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.464201 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.464545 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:34.964531103 +0000 UTC m=+148.154506798 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.565349 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.565799 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.065767047 +0000 UTC m=+148.255742752 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.666765 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.667370 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.167356761 +0000 UTC m=+148.357332456 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.767896 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.768067 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.268047882 +0000 UTC m=+148.458023577 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.768286 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.768620 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.268610777 +0000 UTC m=+148.458586472 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.804457 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-admission-controller-857f4d67dd-tfknb" event={"ID":"b988074b-ff7d-4921-b9a7-bda74319fcc9","Type":"ContainerStarted","Data":"9db88d73f6b535dc23f6219ee87d15a0d840ba31dbe84e262b2057f5718077e3"} Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.806359 4875 generic.go:334] "Generic (PLEG): container finished" podID="49bb8ae9-ab85-458a-850e-8d3b48ab42de" containerID="c95c481c4387cef00e82b37a5d808bc1fb0ef477f26fd634ff482a0f7902bc1f" exitCode=0 Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.806433 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" event={"ID":"49bb8ae9-ab85-458a-850e-8d3b48ab42de","Type":"ContainerDied","Data":"c95c481c4387cef00e82b37a5d808bc1fb0ef477f26fd634ff482a0f7902bc1f"} Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.806816 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" event={"ID":"49bb8ae9-ab85-458a-850e-8d3b48ab42de","Type":"ContainerStarted","Data":"1a324d581bc0b29a4d278abf07c2ef4a73840e28da65faa01e47e26d58677c86"} Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.806908 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.808587 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-dns/dns-default-j5qr7" event={"ID":"888a01e1-690c-41d1-bcc2-1f1403a205dd","Type":"ContainerStarted","Data":"280536a43a7c17af4dafb151a75a522687f8f68ac116ae7ab4a22a1a9d29abe6"} Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.810864 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" event={"ID":"2f69fc9c-de93-4cae-a491-50698ac10599","Type":"ContainerStarted","Data":"ddbedaad54717a997fd9626e63e8951f5bd4ebd5c0730e32d495c2d5823fa7b4"} Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.812361 4875 patch_prober.go:28] interesting pod/olm-operator-6b444d44fb-dbctw container/olm-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" start-of-body= Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.812479 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" podUID="9d929481-cfc4-4d91-b925-ac3a9127e007" containerName="olm-operator" probeResult="failure" output="Get \"https://10.217.0.37:8443/healthz\": dial tcp 10.217.0.37:8443: connect: connection refused" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.812616 4875 patch_prober.go:28] interesting pod/catalog-operator-68c6474976-62q8r container/catalog-operator namespace/openshift-operator-lifecycle-manager: Readiness probe status=failure output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" start-of-body= Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.812660 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" podUID="d04a012b-fcc1-4f75-8e0a-4a4a17d9cf75" containerName="catalog-operator" probeResult="failure" output="Get \"https://10.217.0.20:8443/healthz\": dial tcp 10.217.0.20:8443: connect: connection refused" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.812734 4875 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-6ml5v container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.812760 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" podUID="d725db0e-68dc-4655-ae0f-8e847ef651c3" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.812368 4875 patch_prober.go:28] interesting pod/console-operator-58897d9998-w9htq container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.19:8443/readyz\": dial tcp 10.217.0.19:8443: connect: connection refused" start-of-body= Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.812786 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-w9htq" podUID="2d2fae84-4466-4ef1-af30-b6df2e28f931" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/readyz\": dial tcp 10.217.0.19:8443: connect: connection refused" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.814131 4875 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-v9znd container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.814611 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" podUID="0a3941af-01d7-44c0-8e51-cc719e7f4835" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.840017 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" podStartSLOduration=127.839995643 podStartE2EDuration="2m7.839995643s" podCreationTimestamp="2025-11-26 00:07:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.836706429 +0000 UTC m=+148.026682124" watchObservedRunningTime="2025-11-26 00:09:34.839995643 +0000 UTC m=+148.029971338" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.855506 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" podStartSLOduration=126.855488052 podStartE2EDuration="2m6.855488052s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:34.854819564 +0000 UTC m=+148.044795269" watchObservedRunningTime="2025-11-26 00:09:34.855488052 +0000 UTC m=+148.045463747" Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.869191 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.869365 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.369320468 +0000 UTC m=+148.559296163 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.869682 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.871727 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.371715499 +0000 UTC m=+148.561691194 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.977021 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.977213 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.477195803 +0000 UTC m=+148.667171498 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:34 crc kubenswrapper[4875]: I1126 00:09:34.977304 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:34 crc kubenswrapper[4875]: E1126 00:09:34.977633 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.477626224 +0000 UTC m=+148.667601919 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.080542 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:35 crc kubenswrapper[4875]: E1126 00:09:35.080639 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.580624554 +0000 UTC m=+148.770600249 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.080845 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:35 crc kubenswrapper[4875]: E1126 00:09:35.081137 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.581127997 +0000 UTC m=+148.771103692 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.182222 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:35 crc kubenswrapper[4875]: E1126 00:09:35.182980 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.682965668 +0000 UTC m=+148.872941363 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.224546 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:35 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:35 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:35 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.224597 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.284119 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:35 crc kubenswrapper[4875]: E1126 00:09:35.284549 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.784537761 +0000 UTC m=+148.974513446 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.384806 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:35 crc kubenswrapper[4875]: E1126 00:09:35.384907 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.884892173 +0000 UTC m=+149.074867868 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.385165 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.385188 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:35 crc kubenswrapper[4875]: E1126 00:09:35.385437 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.885429717 +0000 UTC m=+149.075405412 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.385581 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.386370 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.386427 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.387585 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"nginx-conf\" (UniqueName: \"kubernetes.io/configmap/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-nginx-conf\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.396575 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cqllr\" (UniqueName: \"kubernetes.io/projected/3b6479f0-333b-4a96-9adf-2099afdc2447-kube-api-access-cqllr\") pod \"network-check-target-xd92c\" (UID: \"3b6479f0-333b-4a96-9adf-2099afdc2447\") " pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.410963 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"networking-console-plugin-cert\" (UniqueName: \"kubernetes.io/secret/5fe485a1-e14f-4c09-b5b9-f252bc42b7e8-networking-console-plugin-cert\") pod \"networking-console-plugin-85b44fc459-gdk6g\" (UID: \"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8\") " pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.411308 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s2dwl\" (UniqueName: \"kubernetes.io/projected/9d751cbb-f2e2-430d-9754-c882a5e924a5-kube-api-access-s2dwl\") pod \"network-check-source-55646444c4-trplf\" (UID: \"9d751cbb-f2e2-430d-9754-c882a5e924a5\") " pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.446792 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.458480 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.475933 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.487057 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:35 crc kubenswrapper[4875]: E1126 00:09:35.487338 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:35.987322058 +0000 UTC m=+149.177297753 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.588816 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:35 crc kubenswrapper[4875]: E1126 00:09:35.589549 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:36.089536619 +0000 UTC m=+149.279512314 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.677374 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.679290 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.686522 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager"/"installer-sa-dockercfg-kjl2n" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.686850 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager"/"kube-root-ca.crt" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.695186 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:35 crc kubenswrapper[4875]: E1126 00:09:35.695618 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:36.195600798 +0000 UTC m=+149.385576493 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.704067 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.797310 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"4890311c-7d8d-4ad9-b05d-d8de035e07e2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.797726 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.797759 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"4890311c-7d8d-4ad9-b05d-d8de035e07e2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 00:09:35 crc kubenswrapper[4875]: E1126 00:09:35.798043 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:36.298032663 +0000 UTC m=+149.488008358 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.846400 4875 patch_prober.go:28] interesting pod/marketplace-operator-79b997595-6ml5v container/marketplace-operator namespace/openshift-marketplace: Readiness probe status=failure output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" start-of-body= Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.846525 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" podUID="d725db0e-68dc-4655-ae0f-8e847ef651c3" containerName="marketplace-operator" probeResult="failure" output="Get \"http://10.217.0.24:8080/healthz\": dial tcp 10.217.0.24:8080: connect: connection refused" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.847734 4875 patch_prober.go:28] interesting pod/console-operator-58897d9998-w9htq container/console-operator namespace/openshift-console-operator: Readiness probe status=failure output="Get \"https://10.217.0.19:8443/readyz\": dial tcp 10.217.0.19:8443: connect: connection refused" start-of-body= Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.847774 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console-operator/console-operator-58897d9998-w9htq" podUID="2d2fae84-4466-4ef1-af30-b6df2e28f931" containerName="console-operator" probeResult="failure" output="Get \"https://10.217.0.19:8443/readyz\": dial tcp 10.217.0.19:8443: connect: connection refused" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.850106 4875 patch_prober.go:28] interesting pod/oauth-openshift-558db77b4-v9znd container/oauth-openshift namespace/openshift-authentication: Readiness probe status=failure output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" start-of-body= Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.850309 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" podUID="0a3941af-01d7-44c0-8e51-cc719e7f4835" containerName="oauth-openshift" probeResult="failure" output="Get \"https://10.217.0.12:6443/healthz\": dial tcp 10.217.0.12:6443: connect: connection refused" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.900842 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.901097 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"4890311c-7d8d-4ad9-b05d-d8de035e07e2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.901460 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"4890311c-7d8d-4ad9-b05d-d8de035e07e2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 00:09:35 crc kubenswrapper[4875]: E1126 00:09:35.902818 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:36.402790938 +0000 UTC m=+149.592766683 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.906386 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"4890311c-7d8d-4ad9-b05d-d8de035e07e2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 00:09:35 crc kubenswrapper[4875]: I1126 00:09:35.995757 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"4890311c-7d8d-4ad9-b05d-d8de035e07e2\") " pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.003838 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:36 crc kubenswrapper[4875]: E1126 00:09:36.004371 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:36.504350952 +0000 UTC m=+149.694326647 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.056655 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.105987 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:36 crc kubenswrapper[4875]: E1126 00:09:36.106352 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:36.606334715 +0000 UTC m=+149.796310420 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.208141 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:36 crc kubenswrapper[4875]: E1126 00:09:36.209732 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:36.709718115 +0000 UTC m=+149.899693810 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.234707 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:36 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:36 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:36 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.234765 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:36 crc kubenswrapper[4875]: W1126 00:09:36.234937 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3b6479f0_333b_4a96_9adf_2099afdc2447.slice/crio-09b8beea811d664848621e0841b34e6864b2a0bbb06413ca019a5d0c59f98072 WatchSource:0}: Error finding container 09b8beea811d664848621e0841b34e6864b2a0bbb06413ca019a5d0c59f98072: Status 404 returned error can't find the container with id 09b8beea811d664848621e0841b34e6864b2a0bbb06413ca019a5d0c59f98072 Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.316395 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:36 crc kubenswrapper[4875]: E1126 00:09:36.317047 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:36.817029496 +0000 UTC m=+150.007005191 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.420390 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:36 crc kubenswrapper[4875]: E1126 00:09:36.420781 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:36.920765275 +0000 UTC m=+150.110740960 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.521233 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:36 crc kubenswrapper[4875]: E1126 00:09:36.522094 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.022079383 +0000 UTC m=+150.212055078 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.623154 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:36 crc kubenswrapper[4875]: E1126 00:09:36.623590 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.123562384 +0000 UTC m=+150.313538079 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.726325 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:36 crc kubenswrapper[4875]: E1126 00:09:36.727488 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.227460087 +0000 UTC m=+150.417435792 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.740100 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-sgwj7"] Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.741285 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.774389 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.831863 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.831971 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-utilities\") pod \"certified-operators-sgwj7\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.832019 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-r9ld4\" (UniqueName: \"kubernetes.io/projected/b9a62657-d64a-4306-ba77-17417763f811-kube-api-access-r9ld4\") pod \"certified-operators-sgwj7\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.832050 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-catalog-content\") pod \"certified-operators-sgwj7\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:09:36 crc kubenswrapper[4875]: E1126 00:09:36.832548 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.33253078 +0000 UTC m=+150.522506475 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.849393 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sgwj7"] Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.858567 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" event={"ID":"f31ac9d7-e1d6-413f-8749-4f3f13be75c9","Type":"ContainerStarted","Data":"0fff39c6543b27b993385cdc277da7d05f329d8a88d2585294d6509a8a652862"} Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.859837 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"9546e70ef1ed35144892092b94f71d4586e7f12441d9fb4d96cbafd4d7b35b46"} Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.859867 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-source-55646444c4-trplf" event={"ID":"9d751cbb-f2e2-430d-9754-c882a5e924a5","Type":"ContainerStarted","Data":"64460a867d08736c7eaab53cca3466ac43c300ba894c1d739fcc65055f00aa67"} Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.861332 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"49c158842a55459f656d17d46906089243f308df925f8a7e8b6536d5e470fcc4"} Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.861356 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-console/networking-console-plugin-85b44fc459-gdk6g" event={"ID":"5fe485a1-e14f-4c09-b5b9-f252bc42b7e8","Type":"ContainerStarted","Data":"53528a6cf0ddba0cf40c05048661447d18b5d8be0c8476e9cb18af53dc79cafa"} Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.862986 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"dfb3ab76b2e1230013270154a2977d0f17ebb35c52c7fe46b88895d46b0c3fc6"} Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.863010 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-network-diagnostics/network-check-target-xd92c" event={"ID":"3b6479f0-333b-4a96-9adf-2099afdc2447","Type":"ContainerStarted","Data":"09b8beea811d664848621e0841b34e6864b2a0bbb06413ca019a5d0c59f98072"} Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.863285 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.875208 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-controller-manager/revision-pruner-9-crc"] Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.936609 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.937029 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-utilities\") pod \"certified-operators-sgwj7\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.937081 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-r9ld4\" (UniqueName: \"kubernetes.io/projected/b9a62657-d64a-4306-ba77-17417763f811-kube-api-access-r9ld4\") pod \"certified-operators-sgwj7\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.937116 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-catalog-content\") pod \"certified-operators-sgwj7\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:09:36 crc kubenswrapper[4875]: E1126 00:09:36.938011 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.437985634 +0000 UTC m=+150.627961329 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.939085 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-catalog-content\") pod \"certified-operators-sgwj7\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:09:36 crc kubenswrapper[4875]: I1126 00:09:36.939186 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-utilities\") pod \"certified-operators-sgwj7\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.020130 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-r9ld4\" (UniqueName: \"kubernetes.io/projected/b9a62657-d64a-4306-ba77-17417763f811-kube-api-access-r9ld4\") pod \"certified-operators-sgwj7\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.038971 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.040867 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.54084608 +0000 UTC m=+150.730821775 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.093337 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-wx746"] Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.095496 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.119483 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.144077 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.144243 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.64420706 +0000 UTC m=+150.834182755 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.144368 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-utilities\") pod \"certified-operators-wx746\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.144483 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.144526 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-rqbvh\" (UniqueName: \"kubernetes.io/projected/8bd0220e-3135-4767-b60b-e425535026fc-kube-api-access-rqbvh\") pod \"certified-operators-wx746\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.144572 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-catalog-content\") pod \"certified-operators-wx746\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.144971 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.644951639 +0000 UTC m=+150.834927334 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.183171 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wx746"] Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.236725 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:37 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:37 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:37 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.236812 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.247044 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.247358 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.747331762 +0000 UTC m=+150.937307457 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.247404 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-rqbvh\" (UniqueName: \"kubernetes.io/projected/8bd0220e-3135-4767-b60b-e425535026fc-kube-api-access-rqbvh\") pod \"certified-operators-wx746\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.247529 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-catalog-content\") pod \"certified-operators-wx746\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.247598 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-utilities\") pod \"certified-operators-wx746\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.248073 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-utilities\") pod \"certified-operators-wx746\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.248368 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-catalog-content\") pod \"certified-operators-wx746\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.288217 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-rqbvh\" (UniqueName: \"kubernetes.io/projected/8bd0220e-3135-4767-b60b-e425535026fc-kube-api-access-rqbvh\") pod \"certified-operators-wx746\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.349626 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.350132 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.850114997 +0000 UTC m=+151.040090692 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.429931 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.451582 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.451782 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.951753022 +0000 UTC m=+151.141728717 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.452038 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.452394 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:37.952387409 +0000 UTC m=+151.142363104 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.511464 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.511711 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.553099 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.553462 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:38.053433739 +0000 UTC m=+151.243409434 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.654372 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.654784 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:38.154768136 +0000 UTC m=+151.344743831 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.696078 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-b4kmf"] Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.697072 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.708260 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.739076 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b4kmf"] Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.757310 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.757458 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mcrm2\" (UniqueName: \"kubernetes.io/projected/830b0136-66d9-47cb-9a99-f858deae4a28-kube-api-access-mcrm2\") pod \"community-operators-b4kmf\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.757491 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-utilities\") pod \"community-operators-b4kmf\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.757515 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-catalog-content\") pod \"community-operators-b4kmf\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.757655 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:38.257638743 +0000 UTC m=+151.447614438 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.870732 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-catalog-content\") pod \"community-operators-b4kmf\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.870818 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mcrm2\" (UniqueName: \"kubernetes.io/projected/830b0136-66d9-47cb-9a99-f858deae4a28-kube-api-access-mcrm2\") pod \"community-operators-b4kmf\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.870837 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-utilities\") pod \"community-operators-b4kmf\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.870856 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.871077 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:38.371067341 +0000 UTC m=+151.561043036 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.871555 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-catalog-content\") pod \"community-operators-b4kmf\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.871969 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-utilities\") pod \"community-operators-b4kmf\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.921666 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4890311c-7d8d-4ad9-b05d-d8de035e07e2","Type":"ContainerStarted","Data":"1542e5ef87f62e9a92da223d6806dc954cb11d140cbee3ec1f54ad65e566cff7"} Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.921751 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4890311c-7d8d-4ad9-b05d-d8de035e07e2","Type":"ContainerStarted","Data":"59a775b65b5fdc05bbced2610af9812c6c78ef40f3ae18a4c7fdc15f6dff46cc"} Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.963009 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mcrm2\" (UniqueName: \"kubernetes.io/projected/830b0136-66d9-47cb-9a99-f858deae4a28-kube-api-access-mcrm2\") pod \"community-operators-b4kmf\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:09:37 crc kubenswrapper[4875]: I1126 00:09:37.979676 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:37 crc kubenswrapper[4875]: E1126 00:09:37.979975 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:38.479960923 +0000 UTC m=+151.669936618 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.081129 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:38 crc kubenswrapper[4875]: E1126 00:09:38.082855 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:38.58283392 +0000 UTC m=+151.772809615 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.086730 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-controller-manager/revision-pruner-9-crc" podStartSLOduration=3.08670967 podStartE2EDuration="3.08670967s" podCreationTimestamp="2025-11-26 00:09:35 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:37.979251615 +0000 UTC m=+151.169227300" watchObservedRunningTime="2025-11-26 00:09:38.08670967 +0000 UTC m=+151.276685365" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.089633 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bbw4f"] Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.090967 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.106391 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bbw4f"] Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.133348 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-sgwj7"] Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.153229 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.182046 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.182268 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-utilities\") pod \"community-operators-bbw4f\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.182330 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-catalog-content\") pod \"community-operators-bbw4f\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.182381 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nvsjg\" (UniqueName: \"kubernetes.io/projected/3db2a450-bce1-422c-bbf4-8676b8662c2e-kube-api-access-nvsjg\") pod \"community-operators-bbw4f\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:09:38 crc kubenswrapper[4875]: E1126 00:09:38.182503 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:38.682486204 +0000 UTC m=+151.872461889 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.229726 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:38 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:38 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:38 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.229785 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.288074 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-catalog-content\") pod \"community-operators-bbw4f\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.288588 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nvsjg\" (UniqueName: \"kubernetes.io/projected/3db2a450-bce1-422c-bbf4-8676b8662c2e-kube-api-access-nvsjg\") pod \"community-operators-bbw4f\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.288619 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.288676 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-utilities\") pod \"community-operators-bbw4f\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.288779 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-catalog-content\") pod \"community-operators-bbw4f\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.289078 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-utilities\") pod \"community-operators-bbw4f\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:09:38 crc kubenswrapper[4875]: E1126 00:09:38.289329 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:38.789314102 +0000 UTC m=+151.979289807 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.297974 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-wx746"] Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.356874 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nvsjg\" (UniqueName: \"kubernetes.io/projected/3db2a450-bce1-422c-bbf4-8676b8662c2e-kube-api-access-nvsjg\") pod \"community-operators-bbw4f\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.389583 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:38 crc kubenswrapper[4875]: E1126 00:09:38.389926 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:38.889909531 +0000 UTC m=+152.079885226 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.438955 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:09:38 crc kubenswrapper[4875]: E1126 00:09:38.465612 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-pod4890311c_7d8d_4ad9_b05d_d8de035e07e2.slice/crio-conmon-1542e5ef87f62e9a92da223d6806dc954cb11d140cbee3ec1f54ad65e566cff7.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.493142 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:38 crc kubenswrapper[4875]: E1126 00:09:38.493511 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:38.993498136 +0000 UTC m=+152.183473831 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.595818 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:38 crc kubenswrapper[4875]: E1126 00:09:38.596552 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.096534447 +0000 UTC m=+152.286510142 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.698141 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:38 crc kubenswrapper[4875]: E1126 00:09:38.698544 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.198531081 +0000 UTC m=+152.388506776 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.721488 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.763581 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.763642 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.799401 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:38 crc kubenswrapper[4875]: E1126 00:09:38.799786 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.299741356 +0000 UTC m=+152.489717051 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.799883 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:38 crc kubenswrapper[4875]: E1126 00:09:38.801293 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.301273065 +0000 UTC m=+152.491248760 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.819784 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.883941 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-config-operator/openshift-config-operator-7777fb866f-zww4t" Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.908206 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:38 crc kubenswrapper[4875]: E1126 00:09:38.909618 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.409597682 +0000 UTC m=+152.599573377 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.984090 4875 generic.go:334] "Generic (PLEG): container finished" podID="4890311c-7d8d-4ad9-b05d-d8de035e07e2" containerID="1542e5ef87f62e9a92da223d6806dc954cb11d140cbee3ec1f54ad65e566cff7" exitCode=0 Nov 26 00:09:38 crc kubenswrapper[4875]: I1126 00:09:38.984204 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4890311c-7d8d-4ad9-b05d-d8de035e07e2","Type":"ContainerDied","Data":"1542e5ef87f62e9a92da223d6806dc954cb11d140cbee3ec1f54ad65e566cff7"} Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.010157 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.011361 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.51134534 +0000 UTC m=+152.701321045 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.012339 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sgwj7" event={"ID":"b9a62657-d64a-4306-ba77-17417763f811","Type":"ContainerStarted","Data":"5a5fc9439ef61a2ad5eebf6abaa11ac6ae52c5626405d4d918eabd417ade2f38"} Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.012397 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sgwj7" event={"ID":"b9a62657-d64a-4306-ba77-17417763f811","Type":"ContainerStarted","Data":"449b7582df2049da8e10cec74de4b5341aab0b276acb1b63b98006ff035541ea"} Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.042499 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-b4kmf"] Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.052361 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wx746" event={"ID":"8bd0220e-3135-4767-b60b-e425535026fc","Type":"ContainerStarted","Data":"9db5d3d36b58426d7f0aa69ad9e96bc07cd4fb78f58c357ceff38b22fe1236b6"} Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.117912 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.118207 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.618192359 +0000 UTC m=+152.808168054 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.138582 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bbw4f"] Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.219045 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.219664 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.719648889 +0000 UTC m=+152.909624584 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.225428 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.227105 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:39 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:39 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:39 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.227146 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.260502 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.260536 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.266240 4875 patch_prober.go:28] interesting pod/console-f9d7485db-l4gfb container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.266309 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-l4gfb" podUID="4349f16a-1b36-45b5-aa93-f982bcaa781f" containerName="console" probeResult="failure" output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.320900 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.321263 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.821238224 +0000 UTC m=+153.011213919 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.321309 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.321667 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.821658954 +0000 UTC m=+153.011634649 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.380792 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/packageserver-d55dfcdfc-t2hxd" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.422323 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.422597 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.92255113 +0000 UTC m=+153.112526825 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.422842 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.423326 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:39.923302739 +0000 UTC m=+153.113278494 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.459094 4875 patch_prober.go:28] interesting pod/apiserver-76f77b778f-8fcp4 container/openshift-apiserver namespace/openshift-apiserver: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[+]ping ok Nov 26 00:09:39 crc kubenswrapper[4875]: [+]log ok Nov 26 00:09:39 crc kubenswrapper[4875]: [+]etcd ok Nov 26 00:09:39 crc kubenswrapper[4875]: [+]poststarthook/start-apiserver-admission-initializer ok Nov 26 00:09:39 crc kubenswrapper[4875]: [+]poststarthook/generic-apiserver-start-informers ok Nov 26 00:09:39 crc kubenswrapper[4875]: [+]poststarthook/max-in-flight-filter ok Nov 26 00:09:39 crc kubenswrapper[4875]: [+]poststarthook/storage-object-count-tracker-hook ok Nov 26 00:09:39 crc kubenswrapper[4875]: [+]poststarthook/image.openshift.io-apiserver-caches ok Nov 26 00:09:39 crc kubenswrapper[4875]: [-]poststarthook/authorization.openshift.io-bootstrapclusterroles failed: reason withheld Nov 26 00:09:39 crc kubenswrapper[4875]: [-]poststarthook/authorization.openshift.io-ensurenodebootstrap-sa failed: reason withheld Nov 26 00:09:39 crc kubenswrapper[4875]: [+]poststarthook/project.openshift.io-projectcache ok Nov 26 00:09:39 crc kubenswrapper[4875]: [+]poststarthook/project.openshift.io-projectauthorizationcache ok Nov 26 00:09:39 crc kubenswrapper[4875]: [+]poststarthook/openshift.io-startinformers ok Nov 26 00:09:39 crc kubenswrapper[4875]: [+]poststarthook/openshift.io-restmapperupdater ok Nov 26 00:09:39 crc kubenswrapper[4875]: [+]poststarthook/quota.openshift.io-clusterquotamapping ok Nov 26 00:09:39 crc kubenswrapper[4875]: livez check failed Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.459178 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" podUID="a1910aeb-3a24-4922-ada0-a47d871c0998" containerName="openshift-apiserver" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.464360 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-425kq"] Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.465686 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.469788 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.490224 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-425kq"] Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.515852 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.515902 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.516569 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.516658 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.523812 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.524890 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.024874983 +0000 UTC m=+153.214850678 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.625070 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-catalog-content\") pod \"redhat-marketplace-425kq\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.625127 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-utilities\") pod \"redhat-marketplace-425kq\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.625152 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-dhv88\" (UniqueName: \"kubernetes.io/projected/9e709507-2042-4993-a404-c5d8749b0da7-kube-api-access-dhv88\") pod \"redhat-marketplace-425kq\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.625179 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.625627 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.125613694 +0000 UTC m=+153.315589389 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.642639 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/catalog-operator-68c6474976-62q8r" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.733437 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.733744 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-catalog-content\") pod \"redhat-marketplace-425kq\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.733795 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-utilities\") pod \"redhat-marketplace-425kq\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.733826 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-dhv88\" (UniqueName: \"kubernetes.io/projected/9e709507-2042-4993-a404-c5d8749b0da7-kube-api-access-dhv88\") pod \"redhat-marketplace-425kq\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.733860 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.233826139 +0000 UTC m=+153.423801854 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.733925 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.734346 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-catalog-content\") pod \"redhat-marketplace-425kq\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.736156 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-utilities\") pod \"redhat-marketplace-425kq\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.736402 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.236389025 +0000 UTC m=+153.426364720 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.750716 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.750829 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.756708 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.757622 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-dhv88\" (UniqueName: \"kubernetes.io/projected/9e709507-2042-4993-a404-c5d8749b0da7-kube-api-access-dhv88\") pod \"redhat-marketplace-425kq\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.785875 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.837215 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.837404 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.337370043 +0000 UTC m=+153.527345748 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.837841 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.838663 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.338642016 +0000 UTC m=+153.528617801 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.861681 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-rgtxh"] Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.862859 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.874661 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rgtxh"] Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.911881 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.938608 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.938798 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.438764492 +0000 UTC m=+153.628740187 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.938853 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-utilities\") pod \"redhat-marketplace-rgtxh\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.938890 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.938911 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-catalog-content\") pod \"redhat-marketplace-rgtxh\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.938933 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nkk46\" (UniqueName: \"kubernetes.io/projected/50ff6e6d-830c-40e0-9674-c3795df3eead-kube-api-access-nkk46\") pod \"redhat-marketplace-rgtxh\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:09:39 crc kubenswrapper[4875]: E1126 00:09:39.939193 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.439184433 +0000 UTC m=+153.629160118 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:39 crc kubenswrapper[4875]: I1126 00:09:39.973959 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.040059 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.040366 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-utilities\") pod \"redhat-marketplace-rgtxh\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.040522 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-catalog-content\") pod \"redhat-marketplace-rgtxh\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.040560 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nkk46\" (UniqueName: \"kubernetes.io/projected/50ff6e6d-830c-40e0-9674-c3795df3eead-kube-api-access-nkk46\") pod \"redhat-marketplace-rgtxh\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:09:40 crc kubenswrapper[4875]: E1126 00:09:40.041136 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.541112746 +0000 UTC m=+153.731088441 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.042234 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-utilities\") pod \"redhat-marketplace-rgtxh\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.042606 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-catalog-content\") pod \"redhat-marketplace-rgtxh\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.054299 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-425kq"] Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.062688 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" event={"ID":"f31ac9d7-e1d6-413f-8749-4f3f13be75c9","Type":"ContainerStarted","Data":"1f350131a93b5fcd44d1b08743c81fb7090a5c2a5fc6aef4a5a4b6c268962314"} Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.065309 4875 generic.go:334] "Generic (PLEG): container finished" podID="8bd0220e-3135-4767-b60b-e425535026fc" containerID="dc3b971584b5d912071714f4e3a25147d3e2896be4ef74b690222dea3024662b" exitCode=0 Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.065557 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wx746" event={"ID":"8bd0220e-3135-4767-b60b-e425535026fc","Type":"ContainerDied","Data":"dc3b971584b5d912071714f4e3a25147d3e2896be4ef74b690222dea3024662b"} Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.065627 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nkk46\" (UniqueName: \"kubernetes.io/projected/50ff6e6d-830c-40e0-9674-c3795df3eead-kube-api-access-nkk46\") pod \"redhat-marketplace-rgtxh\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.066650 4875 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.067316 4875 generic.go:334] "Generic (PLEG): container finished" podID="830b0136-66d9-47cb-9a99-f858deae4a28" containerID="12bfbd7927ee7b660cec73359c10e24959ce8224a10f55574c21ed1431fabfad" exitCode=0 Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.067405 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4kmf" event={"ID":"830b0136-66d9-47cb-9a99-f858deae4a28","Type":"ContainerDied","Data":"12bfbd7927ee7b660cec73359c10e24959ce8224a10f55574c21ed1431fabfad"} Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.067448 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4kmf" event={"ID":"830b0136-66d9-47cb-9a99-f858deae4a28","Type":"ContainerStarted","Data":"dfebe8857f0ec0eda2ad99bc5456120bba0a289dac8f1480e0f09d8ad7fdd5c1"} Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.069836 4875 generic.go:334] "Generic (PLEG): container finished" podID="b9a62657-d64a-4306-ba77-17417763f811" containerID="5a5fc9439ef61a2ad5eebf6abaa11ac6ae52c5626405d4d918eabd417ade2f38" exitCode=0 Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.069906 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sgwj7" event={"ID":"b9a62657-d64a-4306-ba77-17417763f811","Type":"ContainerDied","Data":"5a5fc9439ef61a2ad5eebf6abaa11ac6ae52c5626405d4d918eabd417ade2f38"} Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.071476 4875 generic.go:334] "Generic (PLEG): container finished" podID="3db2a450-bce1-422c-bbf4-8676b8662c2e" containerID="d4c0ce3a4c619049bdc9a3249b258644da9a8e4412b6ab95bb149cc023d9a6b7" exitCode=0 Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.072327 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbw4f" event={"ID":"3db2a450-bce1-422c-bbf4-8676b8662c2e","Type":"ContainerDied","Data":"d4c0ce3a4c619049bdc9a3249b258644da9a8e4412b6ab95bb149cc023d9a6b7"} Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.072366 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbw4f" event={"ID":"3db2a450-bce1-422c-bbf4-8676b8662c2e","Type":"ContainerStarted","Data":"4da80b9f6a94d76454c6bf56a1a9ded6928fdf3e1e752eefcc6998199d344dc2"} Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.083091 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-oauth-apiserver/apiserver-7bbb656c7d-l4vms" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.103133 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.106183 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console-operator/console-operator-58897d9998-w9htq" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.143212 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:40 crc kubenswrapper[4875]: E1126 00:09:40.144276 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.64426212 +0000 UTC m=+153.834237815 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.187000 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.199759 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/olm-operator-6b444d44fb-dbctw" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.233650 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:40 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:40 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:40 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.233710 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.247144 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:40 crc kubenswrapper[4875]: E1126 00:09:40.249369 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.749338653 +0000 UTC m=+153.939314338 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.318986 4875 plugin_watcher.go:194] "Adding socket path or updating timestamp to desired state cache" path="/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.353333 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:40 crc kubenswrapper[4875]: E1126 00:09:40.353960 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.853939644 +0000 UTC m=+154.043915399 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.431026 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.455339 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:40 crc kubenswrapper[4875]: E1126 00:09:40.456750 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:40.956690588 +0000 UTC m=+154.146666283 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.476329 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-jltf2"] Nov 26 00:09:40 crc kubenswrapper[4875]: E1126 00:09:40.476598 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4890311c-7d8d-4ad9-b05d-d8de035e07e2" containerName="pruner" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.476610 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="4890311c-7d8d-4ad9-b05d-d8de035e07e2" containerName="pruner" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.476718 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="4890311c-7d8d-4ad9-b05d-d8de035e07e2" containerName="pruner" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.477897 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.479923 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.481884 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jltf2"] Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.551717 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-rgtxh"] Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.564791 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kubelet-dir\") pod \"4890311c-7d8d-4ad9-b05d-d8de035e07e2\" (UID: \"4890311c-7d8d-4ad9-b05d-d8de035e07e2\") " Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.564911 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kube-api-access\") pod \"4890311c-7d8d-4ad9-b05d-d8de035e07e2\" (UID: \"4890311c-7d8d-4ad9-b05d-d8de035e07e2\") " Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.565076 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-hnlxn\" (UniqueName: \"kubernetes.io/projected/a2335e94-5d68-4b3e-a13b-37296965798d-kube-api-access-hnlxn\") pod \"redhat-operators-jltf2\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.565145 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-catalog-content\") pod \"redhat-operators-jltf2\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.565192 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-utilities\") pod \"redhat-operators-jltf2\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.565220 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.565478 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "4890311c-7d8d-4ad9-b05d-d8de035e07e2" (UID: "4890311c-7d8d-4ad9-b05d-d8de035e07e2"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:09:40 crc kubenswrapper[4875]: E1126 00:09:40.566839 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName: nodeName:}" failed. No retries permitted until 2025-11-26 00:09:41.066823701 +0000 UTC m=+154.256799396 (durationBeforeRetry 500ms). Error: MountVolume.MountDevice failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "image-registry-697d97f7c8-8pktj" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147") : kubernetes.io/csi: attacher.MountDevice failed to create newCsiDriverClient: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.573265 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "4890311c-7d8d-4ad9-b05d-d8de035e07e2" (UID: "4890311c-7d8d-4ad9-b05d-d8de035e07e2"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.600967 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.601933 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.604460 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.605334 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.612114 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.666251 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.666501 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-hnlxn\" (UniqueName: \"kubernetes.io/projected/a2335e94-5d68-4b3e-a13b-37296965798d-kube-api-access-hnlxn\") pod \"redhat-operators-jltf2\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.666558 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-catalog-content\") pod \"redhat-operators-jltf2\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.666602 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-utilities\") pod \"redhat-operators-jltf2\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:09:40 crc kubenswrapper[4875]: E1126 00:09:40.666729 4875 nestedpendingoperations.go:348] Operation for "{volumeName:kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8 podName:8f668bae-612b-4b75-9490-919e737c6a3b nodeName:}" failed. No retries permitted until 2025-11-26 00:09:41.166711102 +0000 UTC m=+154.356686797 (durationBeforeRetry 500ms). Error: UnmountVolume.TearDown failed for volume "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (UniqueName: "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b") : kubernetes.io/csi: Unmounter.TearDownAt failed to get CSI client: driver name kubevirt.io.hostpath-provisioner not found in the list of registered CSI drivers Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.666753 4875 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.666763 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4890311c-7d8d-4ad9-b05d-d8de035e07e2-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.667114 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-catalog-content\") pod \"redhat-operators-jltf2\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.667565 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-utilities\") pod \"redhat-operators-jltf2\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.685434 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-hnlxn\" (UniqueName: \"kubernetes.io/projected/a2335e94-5d68-4b3e-a13b-37296965798d-kube-api-access-hnlxn\") pod \"redhat-operators-jltf2\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.753032 4875 reconciler.go:161] "OperationExecutor.RegisterPlugin started" plugin={"SocketPath":"/var/lib/kubelet/plugins_registry/kubevirt.io.hostpath-provisioner-reg.sock","Timestamp":"2025-11-26T00:09:40.319478178Z","Handler":null,"Name":""} Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.756880 4875 csi_plugin.go:100] kubernetes.io/csi: Trying to validate a new CSI Driver with name: kubevirt.io.hostpath-provisioner endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock versions: 1.0.0 Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.756919 4875 csi_plugin.go:113] kubernetes.io/csi: Register new plugin with name: kubevirt.io.hostpath-provisioner at endpoint: /var/lib/kubelet/plugins/csi-hostpath/csi.sock Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.768171 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d15feed8-dd77-4a82-ab54-3e64f49db01e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"d15feed8-dd77-4a82-ab54-3e64f49db01e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.768271 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.768344 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d15feed8-dd77-4a82-ab54-3e64f49db01e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"d15feed8-dd77-4a82-ab54-3e64f49db01e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.772750 4875 csi_attacher.go:380] kubernetes.io/csi: attacher.MountDevice STAGE_UNSTAGE_VOLUME capability not set. Skipping MountDevice... Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.772832 4875 operation_generator.go:580] "MountVolume.MountDevice succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") device mount path \"/var/lib/kubelet/plugins/kubernetes.io/csi/kubevirt.io.hostpath-provisioner/1f4776af88835e41c12b831b4c9fed40233456d14189815a54dbe7f892fc1983/globalmount\"" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.804844 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.811830 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-697d97f7c8-8pktj\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.859472 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-wdpnp"] Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.861445 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.873199 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"8f668bae-612b-4b75-9490-919e737c6a3b\" (UID: \"8f668bae-612b-4b75-9490-919e737c6a3b\") " Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.873803 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d15feed8-dd77-4a82-ab54-3e64f49db01e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"d15feed8-dd77-4a82-ab54-3e64f49db01e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.873912 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d15feed8-dd77-4a82-ab54-3e64f49db01e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"d15feed8-dd77-4a82-ab54-3e64f49db01e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.873973 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d15feed8-dd77-4a82-ab54-3e64f49db01e-kubelet-dir\") pod \"revision-pruner-8-crc\" (UID: \"d15feed8-dd77-4a82-ab54-3e64f49db01e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.882447 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wdpnp"] Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.885830 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8") pod "8f668bae-612b-4b75-9490-919e737c6a3b" (UID: "8f668bae-612b-4b75-9490-919e737c6a3b"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.900058 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d15feed8-dd77-4a82-ab54-3e64f49db01e-kube-api-access\") pod \"revision-pruner-8-crc\" (UID: \"d15feed8-dd77-4a82-ab54-3e64f49db01e\") " pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.977043 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-catalog-content\") pod \"redhat-operators-wdpnp\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.977104 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-utilities\") pod \"redhat-operators-wdpnp\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:09:40 crc kubenswrapper[4875]: I1126 00:09:40.977187 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b5pwr\" (UniqueName: \"kubernetes.io/projected/db812bf0-1b40-4520-8c30-0267012a87dc-kube-api-access-b5pwr\") pod \"redhat-operators-wdpnp\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.037259 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.048341 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.055427 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-jltf2"] Nov 26 00:09:41 crc kubenswrapper[4875]: W1126 00:09:41.075132 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda2335e94_5d68_4b3e_a13b_37296965798d.slice/crio-c7a27988fe63e872c3c0fbe035ede00b638f68089210fe42c6d725618886b7c8 WatchSource:0}: Error finding container c7a27988fe63e872c3c0fbe035ede00b638f68089210fe42c6d725618886b7c8: Status 404 returned error can't find the container with id c7a27988fe63e872c3c0fbe035ede00b638f68089210fe42c6d725618886b7c8 Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.078014 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b5pwr\" (UniqueName: \"kubernetes.io/projected/db812bf0-1b40-4520-8c30-0267012a87dc-kube-api-access-b5pwr\") pod \"redhat-operators-wdpnp\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.078077 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-catalog-content\") pod \"redhat-operators-wdpnp\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.078108 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-utilities\") pod \"redhat-operators-wdpnp\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.078762 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-catalog-content\") pod \"redhat-operators-wdpnp\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.078809 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-utilities\") pod \"redhat-operators-wdpnp\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.080742 4875 generic.go:334] "Generic (PLEG): container finished" podID="601c8445-3908-4677-9df9-422f8e97d928" containerID="bcabc62376cc6ca6ddb76a0e359de381ea0bfbe911c20bf1081c563a5bb264b9" exitCode=0 Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.080816 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" event={"ID":"601c8445-3908-4677-9df9-422f8e97d928","Type":"ContainerDied","Data":"bcabc62376cc6ca6ddb76a0e359de381ea0bfbe911c20bf1081c563a5bb264b9"} Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.088466 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/revision-pruner-9-crc" event={"ID":"4890311c-7d8d-4ad9-b05d-d8de035e07e2","Type":"ContainerDied","Data":"59a775b65b5fdc05bbced2610af9812c6c78ef40f3ae18a4c7fdc15f6dff46cc"} Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.088509 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="59a775b65b5fdc05bbced2610af9812c6c78ef40f3ae18a4c7fdc15f6dff46cc" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.088594 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-controller-manager/revision-pruner-9-crc" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.090848 4875 generic.go:334] "Generic (PLEG): container finished" podID="9e709507-2042-4993-a404-c5d8749b0da7" containerID="f5cba063a6fa84676f9100a044177cc1fcebcb24f8a8383a7c5e1ebde1b0cb47" exitCode=0 Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.090900 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-425kq" event={"ID":"9e709507-2042-4993-a404-c5d8749b0da7","Type":"ContainerDied","Data":"f5cba063a6fa84676f9100a044177cc1fcebcb24f8a8383a7c5e1ebde1b0cb47"} Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.090950 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-425kq" event={"ID":"9e709507-2042-4993-a404-c5d8749b0da7","Type":"ContainerStarted","Data":"97833869429a3fe1c5899f93fe3690a2aa5ae7dfa6ec9acc7bf72df059673af2"} Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.092987 4875 generic.go:334] "Generic (PLEG): container finished" podID="50ff6e6d-830c-40e0-9674-c3795df3eead" containerID="cbb25ab15effa2ab791d946e8c4ac53b8d56715dcaf3c72ea6fb04d3f88674c9" exitCode=0 Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.093156 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgtxh" event={"ID":"50ff6e6d-830c-40e0-9674-c3795df3eead","Type":"ContainerDied","Data":"cbb25ab15effa2ab791d946e8c4ac53b8d56715dcaf3c72ea6fb04d3f88674c9"} Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.093192 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgtxh" event={"ID":"50ff6e6d-830c-40e0-9674-c3795df3eead","Type":"ContainerStarted","Data":"fe0cbbd3708657c77e92711b72b02a412e88ef3fde2d60b563ad57865e4ce539"} Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.099310 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b5pwr\" (UniqueName: \"kubernetes.io/projected/db812bf0-1b40-4520-8c30-0267012a87dc-kube-api-access-b5pwr\") pod \"redhat-operators-wdpnp\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.112723 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" event={"ID":"f31ac9d7-e1d6-413f-8749-4f3f13be75c9","Type":"ContainerStarted","Data":"ac102d72c6b6a7f2a9603ffbaac1f322bb7a9dc8b0ecd30ad9b96d4ae2df4c59"} Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.113168 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" event={"ID":"f31ac9d7-e1d6-413f-8749-4f3f13be75c9","Type":"ContainerStarted","Data":"c411b56b576590fe9d1f4278e70b594557883faefc0441be13b4a0e0d62cb88e"} Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.159323 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="hostpath-provisioner/csi-hostpathplugin-wlmp9" podStartSLOduration=15.159304966 podStartE2EDuration="15.159304966s" podCreationTimestamp="2025-11-26 00:09:26 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:41.157160681 +0000 UTC m=+154.347136386" watchObservedRunningTime="2025-11-26 00:09:41.159304966 +0000 UTC m=+154.349280661" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.206685 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.227061 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:41 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:41 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:41 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.227135 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.323811 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8pktj"] Nov 26 00:09:41 crc kubenswrapper[4875]: W1126 00:09:41.340344 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod35ee6c04_3028_4b06_ac2b_6a26a2f2e147.slice/crio-2bbe32ed8153918f703d154ed3cf2248360d1f0df5390bc25c0a9c50b1c18f5b WatchSource:0}: Error finding container 2bbe32ed8153918f703d154ed3cf2248360d1f0df5390bc25c0a9c50b1c18f5b: Status 404 returned error can't find the container with id 2bbe32ed8153918f703d154ed3cf2248360d1f0df5390bc25c0a9c50b1c18f5b Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.354043 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-8-crc"] Nov 26 00:09:41 crc kubenswrapper[4875]: W1126 00:09:41.368001 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-podd15feed8_dd77_4a82_ab54_3e64f49db01e.slice/crio-eec38ba1f6f3ee4089df0b288c6b1fff4c666cd0e9b46b5db5c15212d4e5e0d4 WatchSource:0}: Error finding container eec38ba1f6f3ee4089df0b288c6b1fff4c666cd0e9b46b5db5c15212d4e5e0d4: Status 404 returned error can't find the container with id eec38ba1f6f3ee4089df0b288c6b1fff4c666cd0e9b46b5db5c15212d4e5e0d4 Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.423132 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-wdpnp"] Nov 26 00:09:41 crc kubenswrapper[4875]: I1126 00:09:41.537686 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8f668bae-612b-4b75-9490-919e737c6a3b" path="/var/lib/kubelet/pods/8f668bae-612b-4b75-9490-919e737c6a3b/volumes" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.109026 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-dns/dns-default-j5qr7" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.120377 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" event={"ID":"35ee6c04-3028-4b06-ac2b-6a26a2f2e147","Type":"ContainerStarted","Data":"8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3"} Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.120434 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" event={"ID":"35ee6c04-3028-4b06-ac2b-6a26a2f2e147","Type":"ContainerStarted","Data":"2bbe32ed8153918f703d154ed3cf2248360d1f0df5390bc25c0a9c50b1c18f5b"} Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.120949 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.124530 4875 generic.go:334] "Generic (PLEG): container finished" podID="db812bf0-1b40-4520-8c30-0267012a87dc" containerID="c6b80953796083a5546336ad6b0425003c101cef68297c13041376daeb7b048b" exitCode=0 Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.124672 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdpnp" event={"ID":"db812bf0-1b40-4520-8c30-0267012a87dc","Type":"ContainerDied","Data":"c6b80953796083a5546336ad6b0425003c101cef68297c13041376daeb7b048b"} Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.124701 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdpnp" event={"ID":"db812bf0-1b40-4520-8c30-0267012a87dc","Type":"ContainerStarted","Data":"f89418f126fb007923d5152cad7064bdad1be0757022745695dc20229c821a93"} Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.149578 4875 generic.go:334] "Generic (PLEG): container finished" podID="a2335e94-5d68-4b3e-a13b-37296965798d" containerID="6e7814cb6abd3015a2987f861f8be5a17fb87399f03db55010bd76a79e289c79" exitCode=0 Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.150114 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jltf2" event={"ID":"a2335e94-5d68-4b3e-a13b-37296965798d","Type":"ContainerDied","Data":"6e7814cb6abd3015a2987f861f8be5a17fb87399f03db55010bd76a79e289c79"} Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.150140 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jltf2" event={"ID":"a2335e94-5d68-4b3e-a13b-37296965798d","Type":"ContainerStarted","Data":"c7a27988fe63e872c3c0fbe035ede00b638f68089210fe42c6d725618886b7c8"} Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.158275 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"d15feed8-dd77-4a82-ab54-3e64f49db01e","Type":"ContainerStarted","Data":"5008fc45c3ee2a246aecc82e2732decf9ca709e0425c28d7370abff017221e7e"} Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.158314 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"d15feed8-dd77-4a82-ab54-3e64f49db01e","Type":"ContainerStarted","Data":"eec38ba1f6f3ee4089df0b288c6b1fff4c666cd0e9b46b5db5c15212d4e5e0d4"} Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.168486 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" podStartSLOduration=134.168469441 podStartE2EDuration="2m14.168469441s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:42.165122215 +0000 UTC m=+155.355097900" watchObservedRunningTime="2025-11-26 00:09:42.168469441 +0000 UTC m=+155.358445146" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.220829 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/revision-pruner-8-crc" podStartSLOduration=2.2208074780000002 podStartE2EDuration="2.220807478s" podCreationTimestamp="2025-11-26 00:09:40 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:09:42.219366311 +0000 UTC m=+155.409342006" watchObservedRunningTime="2025-11-26 00:09:42.220807478 +0000 UTC m=+155.410783173" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.230696 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:42 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:42 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:42 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.230774 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.479476 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.606337 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/601c8445-3908-4677-9df9-422f8e97d928-config-volume\") pod \"601c8445-3908-4677-9df9-422f8e97d928\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.606463 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-n8gzv\" (UniqueName: \"kubernetes.io/projected/601c8445-3908-4677-9df9-422f8e97d928-kube-api-access-n8gzv\") pod \"601c8445-3908-4677-9df9-422f8e97d928\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.606503 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/601c8445-3908-4677-9df9-422f8e97d928-secret-volume\") pod \"601c8445-3908-4677-9df9-422f8e97d928\" (UID: \"601c8445-3908-4677-9df9-422f8e97d928\") " Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.608996 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/601c8445-3908-4677-9df9-422f8e97d928-config-volume" (OuterVolumeSpecName: "config-volume") pod "601c8445-3908-4677-9df9-422f8e97d928" (UID: "601c8445-3908-4677-9df9-422f8e97d928"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.629254 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/601c8445-3908-4677-9df9-422f8e97d928-kube-api-access-n8gzv" (OuterVolumeSpecName: "kube-api-access-n8gzv") pod "601c8445-3908-4677-9df9-422f8e97d928" (UID: "601c8445-3908-4677-9df9-422f8e97d928"). InnerVolumeSpecName "kube-api-access-n8gzv". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.637262 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/601c8445-3908-4677-9df9-422f8e97d928-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "601c8445-3908-4677-9df9-422f8e97d928" (UID: "601c8445-3908-4677-9df9-422f8e97d928"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.708502 4875 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/601c8445-3908-4677-9df9-422f8e97d928-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.708537 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-n8gzv\" (UniqueName: \"kubernetes.io/projected/601c8445-3908-4677-9df9-422f8e97d928-kube-api-access-n8gzv\") on node \"crc\" DevicePath \"\"" Nov 26 00:09:42 crc kubenswrapper[4875]: I1126 00:09:42.708549 4875 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/601c8445-3908-4677-9df9-422f8e97d928-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 00:09:43 crc kubenswrapper[4875]: I1126 00:09:43.191770 4875 generic.go:334] "Generic (PLEG): container finished" podID="d15feed8-dd77-4a82-ab54-3e64f49db01e" containerID="5008fc45c3ee2a246aecc82e2732decf9ca709e0425c28d7370abff017221e7e" exitCode=0 Nov 26 00:09:43 crc kubenswrapper[4875]: I1126 00:09:43.192782 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"d15feed8-dd77-4a82-ab54-3e64f49db01e","Type":"ContainerDied","Data":"5008fc45c3ee2a246aecc82e2732decf9ca709e0425c28d7370abff017221e7e"} Nov 26 00:09:43 crc kubenswrapper[4875]: I1126 00:09:43.202929 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" Nov 26 00:09:43 crc kubenswrapper[4875]: I1126 00:09:43.203299 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401920-pfkvn" event={"ID":"601c8445-3908-4677-9df9-422f8e97d928","Type":"ContainerDied","Data":"fab8ed9cb967a8c3e469bd34e98ed5170acf90287640122afef9505b4bddb2e8"} Nov 26 00:09:43 crc kubenswrapper[4875]: I1126 00:09:43.203495 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fab8ed9cb967a8c3e469bd34e98ed5170acf90287640122afef9505b4bddb2e8" Nov 26 00:09:43 crc kubenswrapper[4875]: I1126 00:09:43.225449 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:43 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:43 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:43 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:43 crc kubenswrapper[4875]: I1126 00:09:43.225510 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:43 crc kubenswrapper[4875]: I1126 00:09:43.770514 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:43 crc kubenswrapper[4875]: I1126 00:09:43.775039 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-apiserver/apiserver-76f77b778f-8fcp4" Nov 26 00:09:44 crc kubenswrapper[4875]: I1126 00:09:44.224614 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:44 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:44 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:44 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:44 crc kubenswrapper[4875]: I1126 00:09:44.224962 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:44 crc kubenswrapper[4875]: I1126 00:09:44.465289 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 00:09:44 crc kubenswrapper[4875]: I1126 00:09:44.576302 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d15feed8-dd77-4a82-ab54-3e64f49db01e-kube-api-access\") pod \"d15feed8-dd77-4a82-ab54-3e64f49db01e\" (UID: \"d15feed8-dd77-4a82-ab54-3e64f49db01e\") " Nov 26 00:09:44 crc kubenswrapper[4875]: I1126 00:09:44.576399 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d15feed8-dd77-4a82-ab54-3e64f49db01e-kubelet-dir\") pod \"d15feed8-dd77-4a82-ab54-3e64f49db01e\" (UID: \"d15feed8-dd77-4a82-ab54-3e64f49db01e\") " Nov 26 00:09:44 crc kubenswrapper[4875]: I1126 00:09:44.576489 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/d15feed8-dd77-4a82-ab54-3e64f49db01e-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "d15feed8-dd77-4a82-ab54-3e64f49db01e" (UID: "d15feed8-dd77-4a82-ab54-3e64f49db01e"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:09:44 crc kubenswrapper[4875]: I1126 00:09:44.576715 4875 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/d15feed8-dd77-4a82-ab54-3e64f49db01e-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 00:09:44 crc kubenswrapper[4875]: I1126 00:09:44.580018 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d15feed8-dd77-4a82-ab54-3e64f49db01e-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "d15feed8-dd77-4a82-ab54-3e64f49db01e" (UID: "d15feed8-dd77-4a82-ab54-3e64f49db01e"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:09:44 crc kubenswrapper[4875]: I1126 00:09:44.677940 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/d15feed8-dd77-4a82-ab54-3e64f49db01e-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 00:09:45 crc kubenswrapper[4875]: I1126 00:09:45.220502 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-8-crc" event={"ID":"d15feed8-dd77-4a82-ab54-3e64f49db01e","Type":"ContainerDied","Data":"eec38ba1f6f3ee4089df0b288c6b1fff4c666cd0e9b46b5db5c15212d4e5e0d4"} Nov 26 00:09:45 crc kubenswrapper[4875]: I1126 00:09:45.220799 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="eec38ba1f6f3ee4089df0b288c6b1fff4c666cd0e9b46b5db5c15212d4e5e0d4" Nov 26 00:09:45 crc kubenswrapper[4875]: I1126 00:09:45.220570 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-8-crc" Nov 26 00:09:45 crc kubenswrapper[4875]: I1126 00:09:45.226355 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:45 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:45 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:45 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:45 crc kubenswrapper[4875]: I1126 00:09:45.226400 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:46 crc kubenswrapper[4875]: I1126 00:09:46.230615 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:46 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:46 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:46 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:46 crc kubenswrapper[4875]: I1126 00:09:46.230704 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:47 crc kubenswrapper[4875]: I1126 00:09:47.225309 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:47 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:47 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:47 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:47 crc kubenswrapper[4875]: I1126 00:09:47.225367 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:48 crc kubenswrapper[4875]: I1126 00:09:48.224947 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:48 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:48 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:48 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:48 crc kubenswrapper[4875]: I1126 00:09:48.225221 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:49 crc kubenswrapper[4875]: I1126 00:09:49.225957 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:49 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:49 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:49 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:49 crc kubenswrapper[4875]: I1126 00:09:49.226025 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:49 crc kubenswrapper[4875]: I1126 00:09:49.260056 4875 patch_prober.go:28] interesting pod/console-f9d7485db-l4gfb container/console namespace/openshift-console: Startup probe status=failure output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" start-of-body= Nov 26 00:09:49 crc kubenswrapper[4875]: I1126 00:09:49.260128 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-console/console-f9d7485db-l4gfb" podUID="4349f16a-1b36-45b5-aa93-f982bcaa781f" containerName="console" probeResult="failure" output="Get \"https://10.217.0.26:8443/health\": dial tcp 10.217.0.26:8443: connect: connection refused" Nov 26 00:09:49 crc kubenswrapper[4875]: I1126 00:09:49.515790 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:09:49 crc kubenswrapper[4875]: I1126 00:09:49.515850 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:09:49 crc kubenswrapper[4875]: I1126 00:09:49.515785 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:09:49 crc kubenswrapper[4875]: I1126 00:09:49.515911 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:09:49 crc kubenswrapper[4875]: I1126 00:09:49.567487 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:49 crc kubenswrapper[4875]: I1126 00:09:49.591803 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"metrics-certs\" (UniqueName: \"kubernetes.io/secret/3b640f09-f96d-4197-97be-f50e211e484d-metrics-certs\") pod \"network-metrics-daemon-jhkp7\" (UID: \"3b640f09-f96d-4197-97be-f50e211e484d\") " pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:49 crc kubenswrapper[4875]: I1126 00:09:49.868653 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-multus/network-metrics-daemon-jhkp7" Nov 26 00:09:50 crc kubenswrapper[4875]: I1126 00:09:50.225871 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:50 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:50 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:50 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:50 crc kubenswrapper[4875]: I1126 00:09:50.225960 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:51 crc kubenswrapper[4875]: I1126 00:09:51.224687 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:51 crc kubenswrapper[4875]: [-]has-synced failed: reason withheld Nov 26 00:09:51 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:51 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:51 crc kubenswrapper[4875]: I1126 00:09:51.224882 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:52 crc kubenswrapper[4875]: I1126 00:09:52.225113 4875 patch_prober.go:28] interesting pod/router-default-5444994796-bplfw container/router namespace/openshift-ingress: Startup probe status=failure output="HTTP probe failed with statuscode: 500" start-of-body=[-]backend-http failed: reason withheld Nov 26 00:09:52 crc kubenswrapper[4875]: [+]has-synced ok Nov 26 00:09:52 crc kubenswrapper[4875]: [+]process-running ok Nov 26 00:09:52 crc kubenswrapper[4875]: healthz check failed Nov 26 00:09:52 crc kubenswrapper[4875]: I1126 00:09:52.225181 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-ingress/router-default-5444994796-bplfw" podUID="b226d68e-dbae-401a-88d4-ccc0cb1a51cf" containerName="router" probeResult="failure" output="HTTP probe failed with statuscode: 500" Nov 26 00:09:53 crc kubenswrapper[4875]: I1126 00:09:53.225591 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:53 crc kubenswrapper[4875]: I1126 00:09:53.227763 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ingress/router-default-5444994796-bplfw" Nov 26 00:09:59 crc kubenswrapper[4875]: I1126 00:09:59.319154 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:59 crc kubenswrapper[4875]: I1126 00:09:59.324549 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/console-f9d7485db-l4gfb" Nov 26 00:09:59 crc kubenswrapper[4875]: I1126 00:09:59.517231 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:09:59 crc kubenswrapper[4875]: I1126 00:09:59.517302 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:09:59 crc kubenswrapper[4875]: I1126 00:09:59.518392 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:09:59 crc kubenswrapper[4875]: I1126 00:09:59.518745 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:09:59 crc kubenswrapper[4875]: I1126 00:09:59.518852 4875 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-console/downloads-7954f5f757-7s69q" Nov 26 00:09:59 crc kubenswrapper[4875]: I1126 00:09:59.520263 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:09:59 crc kubenswrapper[4875]: I1126 00:09:59.520320 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:09:59 crc kubenswrapper[4875]: I1126 00:09:59.520807 4875 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="download-server" containerStatusID={"Type":"cri-o","ID":"380a17ba4f04785116c8889678205926dc189280d0ba8398772598d38d524232"} pod="openshift-console/downloads-7954f5f757-7s69q" containerMessage="Container download-server failed liveness probe, will be restarted" Nov 26 00:09:59 crc kubenswrapper[4875]: I1126 00:09:59.520908 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" containerID="cri-o://380a17ba4f04785116c8889678205926dc189280d0ba8398772598d38d524232" gracePeriod=2 Nov 26 00:10:00 crc kubenswrapper[4875]: I1126 00:10:00.314024 4875 generic.go:334] "Generic (PLEG): container finished" podID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerID="380a17ba4f04785116c8889678205926dc189280d0ba8398772598d38d524232" exitCode=0 Nov 26 00:10:00 crc kubenswrapper[4875]: I1126 00:10:00.314152 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7s69q" event={"ID":"b118a730-fb77-4632-83f3-9b94b4d52b2c","Type":"ContainerDied","Data":"380a17ba4f04785116c8889678205926dc189280d0ba8398772598d38d524232"} Nov 26 00:10:01 crc kubenswrapper[4875]: I1126 00:10:01.041864 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:10:07 crc kubenswrapper[4875]: I1126 00:10:07.511948 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:10:07 crc kubenswrapper[4875]: I1126 00:10:07.512494 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:10:07 crc kubenswrapper[4875]: E1126 00:10:07.597251 4875 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 26 00:10:07 crc kubenswrapper[4875]: E1126 00:10:07.597979 4875 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-r9ld4,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-sgwj7_openshift-marketplace(b9a62657-d64a-4306-ba77-17417763f811): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 00:10:07 crc kubenswrapper[4875]: E1126 00:10:07.599196 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-sgwj7" podUID="b9a62657-d64a-4306-ba77-17417763f811" Nov 26 00:10:08 crc kubenswrapper[4875]: I1126 00:10:08.349179 4875 generic.go:334] "Generic (PLEG): container finished" podID="55a68700-c0f7-4ae5-a252-013c8fbab545" containerID="f4fc1d6dc90bfe843e93fdbaf3ff36106df6a4b963d9bedb8eef7c7436d2109b" exitCode=0 Nov 26 00:10:08 crc kubenswrapper[4875]: I1126 00:10:08.349271 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29401920-nz8v2" event={"ID":"55a68700-c0f7-4ae5-a252-013c8fbab545","Type":"ContainerDied","Data":"f4fc1d6dc90bfe843e93fdbaf3ff36106df6a4b963d9bedb8eef7c7436d2109b"} Nov 26 00:10:09 crc kubenswrapper[4875]: I1126 00:10:09.436237 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operator-lifecycle-manager/package-server-manager-789f6589d5-fgpzs" Nov 26 00:10:09 crc kubenswrapper[4875]: I1126 00:10:09.517178 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:10:09 crc kubenswrapper[4875]: I1126 00:10:09.517579 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:10:09 crc kubenswrapper[4875]: E1126 00:10:09.886465 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/certified-operator-index:v4.18\\\"\"" pod="openshift-marketplace/certified-operators-sgwj7" podUID="b9a62657-d64a-4306-ba77-17417763f811" Nov 26 00:10:15 crc kubenswrapper[4875]: I1126 00:10:15.485172 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-network-diagnostics/network-check-target-xd92c" Nov 26 00:10:19 crc kubenswrapper[4875]: I1126 00:10:19.515807 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:10:19 crc kubenswrapper[4875]: I1126 00:10:19.515870 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:10:20 crc kubenswrapper[4875]: E1126 00:10:20.252461 4875 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 26 00:10:20 crc kubenswrapper[4875]: E1126 00:10:20.252878 4875 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nvsjg,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-bbw4f_openshift-marketplace(3db2a450-bce1-422c-bbf4-8676b8662c2e): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 00:10:20 crc kubenswrapper[4875]: E1126 00:10:20.255032 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-bbw4f" podUID="3db2a450-bce1-422c-bbf4-8676b8662c2e" Nov 26 00:10:25 crc kubenswrapper[4875]: E1126 00:10:25.700656 4875 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 26 00:10:25 crc kubenswrapper[4875]: E1126 00:10:25.701044 4875 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-hnlxn,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-jltf2_openshift-marketplace(a2335e94-5d68-4b3e-a13b-37296965798d): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 00:10:25 crc kubenswrapper[4875]: E1126 00:10:25.702214 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-jltf2" podUID="a2335e94-5d68-4b3e-a13b-37296965798d" Nov 26 00:10:29 crc kubenswrapper[4875]: I1126 00:10:29.516111 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:10:29 crc kubenswrapper[4875]: I1126 00:10:29.516844 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:10:30 crc kubenswrapper[4875]: E1126 00:10:30.463491 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-jltf2" podUID="a2335e94-5d68-4b3e-a13b-37296965798d" Nov 26 00:10:30 crc kubenswrapper[4875]: E1126 00:10:30.490564 4875 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-operator-index:v4.18" Nov 26 00:10:30 crc kubenswrapper[4875]: E1126 00:10:30.490975 4875 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-b5pwr,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-operators-wdpnp_openshift-marketplace(db812bf0-1b40-4520-8c30-0267012a87dc): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 00:10:30 crc kubenswrapper[4875]: E1126 00:10:30.492402 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-operators-wdpnp" podUID="db812bf0-1b40-4520-8c30-0267012a87dc" Nov 26 00:10:30 crc kubenswrapper[4875]: E1126 00:10:30.507386 4875 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 26 00:10:30 crc kubenswrapper[4875]: E1126 00:10:30.507549 4875 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-nkk46,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-rgtxh_openshift-marketplace(50ff6e6d-830c-40e0-9674-c3795df3eead): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 00:10:30 crc kubenswrapper[4875]: E1126 00:10:30.509662 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-rgtxh" podUID="50ff6e6d-830c-40e0-9674-c3795df3eead" Nov 26 00:10:30 crc kubenswrapper[4875]: I1126 00:10:30.546356 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29401920-nz8v2" Nov 26 00:10:30 crc kubenswrapper[4875]: I1126 00:10:30.674152 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-multus/network-metrics-daemon-jhkp7"] Nov 26 00:10:30 crc kubenswrapper[4875]: I1126 00:10:30.729021 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/55a68700-c0f7-4ae5-a252-013c8fbab545-serviceca\") pod \"55a68700-c0f7-4ae5-a252-013c8fbab545\" (UID: \"55a68700-c0f7-4ae5-a252-013c8fbab545\") " Nov 26 00:10:30 crc kubenswrapper[4875]: I1126 00:10:30.730024 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/55a68700-c0f7-4ae5-a252-013c8fbab545-serviceca" (OuterVolumeSpecName: "serviceca") pod "55a68700-c0f7-4ae5-a252-013c8fbab545" (UID: "55a68700-c0f7-4ae5-a252-013c8fbab545"). InnerVolumeSpecName "serviceca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:10:30 crc kubenswrapper[4875]: I1126 00:10:30.730041 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xhlvk\" (UniqueName: \"kubernetes.io/projected/55a68700-c0f7-4ae5-a252-013c8fbab545-kube-api-access-xhlvk\") pod \"55a68700-c0f7-4ae5-a252-013c8fbab545\" (UID: \"55a68700-c0f7-4ae5-a252-013c8fbab545\") " Nov 26 00:10:30 crc kubenswrapper[4875]: I1126 00:10:30.730700 4875 reconciler_common.go:293] "Volume detached for volume \"serviceca\" (UniqueName: \"kubernetes.io/configmap/55a68700-c0f7-4ae5-a252-013c8fbab545-serviceca\") on node \"crc\" DevicePath \"\"" Nov 26 00:10:30 crc kubenswrapper[4875]: I1126 00:10:30.737973 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/55a68700-c0f7-4ae5-a252-013c8fbab545-kube-api-access-xhlvk" (OuterVolumeSpecName: "kube-api-access-xhlvk") pod "55a68700-c0f7-4ae5-a252-013c8fbab545" (UID: "55a68700-c0f7-4ae5-a252-013c8fbab545"). InnerVolumeSpecName "kube-api-access-xhlvk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:10:30 crc kubenswrapper[4875]: I1126 00:10:30.832991 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xhlvk\" (UniqueName: \"kubernetes.io/projected/55a68700-c0f7-4ae5-a252-013c8fbab545-kube-api-access-xhlvk\") on node \"crc\" DevicePath \"\"" Nov 26 00:10:31 crc kubenswrapper[4875]: I1126 00:10:31.472207 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-pruner-29401920-nz8v2" Nov 26 00:10:31 crc kubenswrapper[4875]: I1126 00:10:31.472220 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-pruner-29401920-nz8v2" event={"ID":"55a68700-c0f7-4ae5-a252-013c8fbab545","Type":"ContainerDied","Data":"f1c3f616f121bbba82e5e4fd3c5324139ca7decef01a101bc01a7282016ba4af"} Nov 26 00:10:31 crc kubenswrapper[4875]: I1126 00:10:31.472713 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="f1c3f616f121bbba82e5e4fd3c5324139ca7decef01a101bc01a7282016ba4af" Nov 26 00:10:31 crc kubenswrapper[4875]: I1126 00:10:31.473869 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" event={"ID":"3b640f09-f96d-4197-97be-f50e211e484d","Type":"ContainerStarted","Data":"5c4474acf5b1945b66cd05439930d0dadd24dbe8131478534a4c6793e2931240"} Nov 26 00:10:31 crc kubenswrapper[4875]: E1126 00:10:31.474675 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-operator-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-operators-wdpnp" podUID="db812bf0-1b40-4520-8c30-0267012a87dc" Nov 26 00:10:31 crc kubenswrapper[4875]: E1126 00:10:31.474933 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-rgtxh" podUID="50ff6e6d-830c-40e0-9674-c3795df3eead" Nov 26 00:10:32 crc kubenswrapper[4875]: I1126 00:10:32.480248 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-console/downloads-7954f5f757-7s69q" event={"ID":"b118a730-fb77-4632-83f3-9b94b4d52b2c","Type":"ContainerStarted","Data":"32bf57a803bc48eb0bddaf31074f31f515bf1f16a3e6ada81f97d6214ae2cb59"} Nov 26 00:10:34 crc kubenswrapper[4875]: E1126 00:10:34.347355 4875 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/redhat-marketplace-index:v4.18" Nov 26 00:10:34 crc kubenswrapper[4875]: E1126 00:10:34.348881 4875 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/redhat-marketplace-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-dhv88,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod redhat-marketplace-425kq_openshift-marketplace(9e709507-2042-4993-a404-c5d8749b0da7): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 00:10:34 crc kubenswrapper[4875]: E1126 00:10:34.351277 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/redhat-marketplace-425kq" podUID="9e709507-2042-4993-a404-c5d8749b0da7" Nov 26 00:10:34 crc kubenswrapper[4875]: I1126 00:10:34.492620 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" event={"ID":"3b640f09-f96d-4197-97be-f50e211e484d","Type":"ContainerStarted","Data":"a70defc5d1f4ee7f7db77df552d486356d68e554895155280ec6030a1a2ec105"} Nov 26 00:10:34 crc kubenswrapper[4875]: I1126 00:10:34.493045 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-console/downloads-7954f5f757-7s69q" Nov 26 00:10:34 crc kubenswrapper[4875]: I1126 00:10:34.493314 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:10:34 crc kubenswrapper[4875]: I1126 00:10:34.493359 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:10:34 crc kubenswrapper[4875]: E1126 00:10:34.495169 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/redhat-marketplace-index:v4.18\\\"\"" pod="openshift-marketplace/redhat-marketplace-425kq" podUID="9e709507-2042-4993-a404-c5d8749b0da7" Nov 26 00:10:34 crc kubenswrapper[4875]: E1126 00:10:34.776445 4875 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/community-operator-index:v4.18" Nov 26 00:10:34 crc kubenswrapper[4875]: E1126 00:10:34.776608 4875 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/community-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-mcrm2,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod community-operators-b4kmf_openshift-marketplace(830b0136-66d9-47cb-9a99-f858deae4a28): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 00:10:34 crc kubenswrapper[4875]: E1126 00:10:34.777780 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/community-operators-b4kmf" podUID="830b0136-66d9-47cb-9a99-f858deae4a28" Nov 26 00:10:35 crc kubenswrapper[4875]: I1126 00:10:35.506457 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/network-metrics-daemon-jhkp7" event={"ID":"3b640f09-f96d-4197-97be-f50e211e484d","Type":"ContainerStarted","Data":"85e5652120117a5a57e829170aad7c5db212ba7eb23c6c6485149e9a7896edba"} Nov 26 00:10:35 crc kubenswrapper[4875]: I1126 00:10:35.507337 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:10:35 crc kubenswrapper[4875]: I1126 00:10:35.507360 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:10:35 crc kubenswrapper[4875]: E1126 00:10:35.507579 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/redhat/community-operator-index:v4.18\\\"\"" pod="openshift-marketplace/community-operators-b4kmf" podUID="830b0136-66d9-47cb-9a99-f858deae4a28" Nov 26 00:10:35 crc kubenswrapper[4875]: I1126 00:10:35.536832 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-multus/network-metrics-daemon-jhkp7" podStartSLOduration=187.536813277 podStartE2EDuration="3m7.536813277s" podCreationTimestamp="2025-11-26 00:07:28 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:10:35.534822426 +0000 UTC m=+208.724798141" watchObservedRunningTime="2025-11-26 00:10:35.536813277 +0000 UTC m=+208.726788972" Nov 26 00:10:35 crc kubenswrapper[4875]: E1126 00:10:35.553137 4875 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/redhat/certified-operator-index:v4.18" Nov 26 00:10:35 crc kubenswrapper[4875]: E1126 00:10:35.553301 4875 kuberuntime_manager.go:1274] "Unhandled Error" err="init container &Container{Name:extract-content,Image:registry.redhat.io/redhat/certified-operator-index:v4.18,Command:[/utilities/copy-content],Args:[--catalog.from=/configs --catalog.to=/extracted-catalog/catalog --cache.from=/tmp/cache --cache.to=/extracted-catalog/cache],WorkingDir:,Ports:[]ContainerPort{},Env:[]EnvVar{},Resources:ResourceRequirements{Limits:ResourceList{},Requests:ResourceList{},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:utilities,ReadOnly:false,MountPath:/utilities,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:catalog-content,ReadOnly:false,MountPath:/extracted-catalog,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},VolumeMount{Name:kube-api-access-rqbvh,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:Always,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:*1000170000,RunAsNonRoot:*true,ReadOnlyRootFilesystem:nil,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod certified-operators-wx746_openshift-marketplace(8bd0220e-3135-4767-b60b-e425535026fc): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 00:10:35 crc kubenswrapper[4875]: E1126 00:10:35.554496 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"extract-content\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-marketplace/certified-operators-wx746" podUID="8bd0220e-3135-4767-b60b-e425535026fc" Nov 26 00:10:37 crc kubenswrapper[4875]: I1126 00:10:37.511787 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:10:37 crc kubenswrapper[4875]: I1126 00:10:37.512004 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:10:37 crc kubenswrapper[4875]: I1126 00:10:37.512056 4875 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:10:37 crc kubenswrapper[4875]: I1126 00:10:37.512598 4875 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371"} pod="openshift-machine-config-operator/machine-config-daemon-9mztb" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 00:10:37 crc kubenswrapper[4875]: I1126 00:10:37.512647 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" containerID="cri-o://2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371" gracePeriod=600 Nov 26 00:10:38 crc kubenswrapper[4875]: I1126 00:10:38.522373 4875 generic.go:334] "Generic (PLEG): container finished" podID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerID="2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371" exitCode=0 Nov 26 00:10:38 crc kubenswrapper[4875]: I1126 00:10:38.522482 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerDied","Data":"2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371"} Nov 26 00:10:39 crc kubenswrapper[4875]: I1126 00:10:39.515641 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Liveness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:10:39 crc kubenswrapper[4875]: I1126 00:10:39.515647 4875 patch_prober.go:28] interesting pod/downloads-7954f5f757-7s69q container/download-server namespace/openshift-console: Readiness probe status=failure output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" start-of-body= Nov 26 00:10:39 crc kubenswrapper[4875]: I1126 00:10:39.516052 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:10:39 crc kubenswrapper[4875]: I1126 00:10:39.516078 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="openshift-console/downloads-7954f5f757-7s69q" podUID="b118a730-fb77-4632-83f3-9b94b4d52b2c" containerName="download-server" probeResult="failure" output="Get \"http://10.217.0.14:8080/\": dial tcp 10.217.0.14:8080: connect: connection refused" Nov 26 00:10:49 crc kubenswrapper[4875]: I1126 00:10:49.536573 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-console/downloads-7954f5f757-7s69q" Nov 26 00:11:00 crc kubenswrapper[4875]: I1126 00:11:00.636865 4875 generic.go:334] "Generic (PLEG): container finished" podID="b9a62657-d64a-4306-ba77-17417763f811" containerID="dc456bf79b9eba53dad87604c7d1658afaf9e7cc2eb45073f6a2eedd4f75e001" exitCode=0 Nov 26 00:11:00 crc kubenswrapper[4875]: I1126 00:11:00.636946 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sgwj7" event={"ID":"b9a62657-d64a-4306-ba77-17417763f811","Type":"ContainerDied","Data":"dc456bf79b9eba53dad87604c7d1658afaf9e7cc2eb45073f6a2eedd4f75e001"} Nov 26 00:11:00 crc kubenswrapper[4875]: I1126 00:11:00.641186 4875 generic.go:334] "Generic (PLEG): container finished" podID="3db2a450-bce1-422c-bbf4-8676b8662c2e" containerID="c855f9295e9b1daa71fc776ebe2b0d0a21c4813fc747b4bb5a636d13c603aef7" exitCode=0 Nov 26 00:11:00 crc kubenswrapper[4875]: I1126 00:11:00.641330 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbw4f" event={"ID":"3db2a450-bce1-422c-bbf4-8676b8662c2e","Type":"ContainerDied","Data":"c855f9295e9b1daa71fc776ebe2b0d0a21c4813fc747b4bb5a636d13c603aef7"} Nov 26 00:11:00 crc kubenswrapper[4875]: I1126 00:11:00.657840 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerStarted","Data":"1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44"} Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.762039 4875 generic.go:334] "Generic (PLEG): container finished" podID="50ff6e6d-830c-40e0-9674-c3795df3eead" containerID="de1432f0ebf79167ad37606833d991e09691bcb81997d1d682456c7ae2b6c30a" exitCode=0 Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.762130 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgtxh" event={"ID":"50ff6e6d-830c-40e0-9674-c3795df3eead","Type":"ContainerDied","Data":"de1432f0ebf79167ad37606833d991e09691bcb81997d1d682456c7ae2b6c30a"} Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.764794 4875 generic.go:334] "Generic (PLEG): container finished" podID="a2335e94-5d68-4b3e-a13b-37296965798d" containerID="f5ae930f5cb59c4f207238a32693ef2b71f4580141e002a4c9ead1c20dca013e" exitCode=0 Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.764856 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jltf2" event={"ID":"a2335e94-5d68-4b3e-a13b-37296965798d","Type":"ContainerDied","Data":"f5ae930f5cb59c4f207238a32693ef2b71f4580141e002a4c9ead1c20dca013e"} Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.773247 4875 generic.go:334] "Generic (PLEG): container finished" podID="8bd0220e-3135-4767-b60b-e425535026fc" containerID="a2f925fd829b8c6996ef9793da98b3ecedccc29b824806e26fcd91db164726ba" exitCode=0 Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.773305 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wx746" event={"ID":"8bd0220e-3135-4767-b60b-e425535026fc","Type":"ContainerDied","Data":"a2f925fd829b8c6996ef9793da98b3ecedccc29b824806e26fcd91db164726ba"} Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.776164 4875 generic.go:334] "Generic (PLEG): container finished" podID="830b0136-66d9-47cb-9a99-f858deae4a28" containerID="c8aed07714d6c8aefdb0e6a5abf839b0f5e3de75dca4dbb0884152034c9c0e17" exitCode=0 Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.776243 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4kmf" event={"ID":"830b0136-66d9-47cb-9a99-f858deae4a28","Type":"ContainerDied","Data":"c8aed07714d6c8aefdb0e6a5abf839b0f5e3de75dca4dbb0884152034c9c0e17"} Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.779286 4875 generic.go:334] "Generic (PLEG): container finished" podID="db812bf0-1b40-4520-8c30-0267012a87dc" containerID="4fa7e855be4ca8b52a7d6966693bef3b48b8917af61e112eea14a49bd939d405" exitCode=0 Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.779445 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdpnp" event={"ID":"db812bf0-1b40-4520-8c30-0267012a87dc","Type":"ContainerDied","Data":"4fa7e855be4ca8b52a7d6966693bef3b48b8917af61e112eea14a49bd939d405"} Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.787928 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sgwj7" event={"ID":"b9a62657-d64a-4306-ba77-17417763f811","Type":"ContainerStarted","Data":"d59a2c580620323609b46be216adb653f119352da9aec2af3a895279a9690934"} Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.797871 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbw4f" event={"ID":"3db2a450-bce1-422c-bbf4-8676b8662c2e","Type":"ContainerStarted","Data":"5df940b2d998cccc0b893824ae3830e68551a99623756524df27a471f10c9e32"} Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.802246 4875 generic.go:334] "Generic (PLEG): container finished" podID="9e709507-2042-4993-a404-c5d8749b0da7" containerID="f900628c22e2bb685c41907d106c44fcf8cc2627f26b8ac8c3d300b6da063f6b" exitCode=0 Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.802307 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-425kq" event={"ID":"9e709507-2042-4993-a404-c5d8749b0da7","Type":"ContainerDied","Data":"f900628c22e2bb685c41907d106c44fcf8cc2627f26b8ac8c3d300b6da063f6b"} Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.924093 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bbw4f" podStartSLOduration=3.466476369 podStartE2EDuration="1m42.924068573s" podCreationTimestamp="2025-11-26 00:09:38 +0000 UTC" firstStartedPulling="2025-11-26 00:09:40.088078313 +0000 UTC m=+153.278054008" lastFinishedPulling="2025-11-26 00:11:19.545670517 +0000 UTC m=+252.735646212" observedRunningTime="2025-11-26 00:11:20.915244945 +0000 UTC m=+254.105220650" watchObservedRunningTime="2025-11-26 00:11:20.924068573 +0000 UTC m=+254.114044278" Nov 26 00:11:20 crc kubenswrapper[4875]: I1126 00:11:20.954858 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-sgwj7" podStartSLOduration=5.771936458 podStartE2EDuration="1m44.954831224s" podCreationTimestamp="2025-11-26 00:09:36 +0000 UTC" firstStartedPulling="2025-11-26 00:09:40.076489675 +0000 UTC m=+153.266465390" lastFinishedPulling="2025-11-26 00:11:19.259384461 +0000 UTC m=+252.449360156" observedRunningTime="2025-11-26 00:11:20.951758454 +0000 UTC m=+254.141734179" watchObservedRunningTime="2025-11-26 00:11:20.954831224 +0000 UTC m=+254.144806919" Nov 26 00:11:21 crc kubenswrapper[4875]: I1126 00:11:21.809986 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4kmf" event={"ID":"830b0136-66d9-47cb-9a99-f858deae4a28","Type":"ContainerStarted","Data":"7e4e0cbe2f33ce20dd719319fc7186faae0b0cf00b67753c0a4fb0c3bfd56de5"} Nov 26 00:11:22 crc kubenswrapper[4875]: I1126 00:11:22.819144 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgtxh" event={"ID":"50ff6e6d-830c-40e0-9674-c3795df3eead","Type":"ContainerStarted","Data":"ce17bb87c3b5c1763171fda89918725c6c53911abdf43b4e5aff64923ddc9810"} Nov 26 00:11:22 crc kubenswrapper[4875]: I1126 00:11:22.822520 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-425kq" event={"ID":"9e709507-2042-4993-a404-c5d8749b0da7","Type":"ContainerStarted","Data":"5e57024b97d3b0000c1a8e26536d619a6569a9495c9dfe5216fe3a4fd600d49e"} Nov 26 00:11:22 crc kubenswrapper[4875]: I1126 00:11:22.839826 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-rgtxh" podStartSLOduration=3.331737391 podStartE2EDuration="1m43.839806513s" podCreationTimestamp="2025-11-26 00:09:39 +0000 UTC" firstStartedPulling="2025-11-26 00:09:41.107333128 +0000 UTC m=+154.297308823" lastFinishedPulling="2025-11-26 00:11:21.61540225 +0000 UTC m=+254.805377945" observedRunningTime="2025-11-26 00:11:22.837863274 +0000 UTC m=+256.027838969" watchObservedRunningTime="2025-11-26 00:11:22.839806513 +0000 UTC m=+256.029782208" Nov 26 00:11:22 crc kubenswrapper[4875]: I1126 00:11:22.840536 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-b4kmf" podStartSLOduration=4.567332544 podStartE2EDuration="1m45.840528272s" podCreationTimestamp="2025-11-26 00:09:37 +0000 UTC" firstStartedPulling="2025-11-26 00:09:40.082261144 +0000 UTC m=+153.272236839" lastFinishedPulling="2025-11-26 00:11:21.355456872 +0000 UTC m=+254.545432567" observedRunningTime="2025-11-26 00:11:21.830386271 +0000 UTC m=+255.020361976" watchObservedRunningTime="2025-11-26 00:11:22.840528272 +0000 UTC m=+256.030503967" Nov 26 00:11:22 crc kubenswrapper[4875]: I1126 00:11:22.858221 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-425kq" podStartSLOduration=3.446262679 podStartE2EDuration="1m43.858199897s" podCreationTimestamp="2025-11-26 00:09:39 +0000 UTC" firstStartedPulling="2025-11-26 00:09:41.107567114 +0000 UTC m=+154.297542809" lastFinishedPulling="2025-11-26 00:11:21.519504332 +0000 UTC m=+254.709480027" observedRunningTime="2025-11-26 00:11:22.855943918 +0000 UTC m=+256.045919613" watchObservedRunningTime="2025-11-26 00:11:22.858199897 +0000 UTC m=+256.048175592" Nov 26 00:11:23 crc kubenswrapper[4875]: I1126 00:11:23.830871 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdpnp" event={"ID":"db812bf0-1b40-4520-8c30-0267012a87dc","Type":"ContainerStarted","Data":"102e1bb52c71fe60962bf4b93f826c71904cc1e9b4d58ad19043d5c4f462db97"} Nov 26 00:11:23 crc kubenswrapper[4875]: I1126 00:11:23.832990 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jltf2" event={"ID":"a2335e94-5d68-4b3e-a13b-37296965798d","Type":"ContainerStarted","Data":"7a1e8eb4d0a22760d011ec88c591c730f48cda76eb9e4c090d0c1559dead344a"} Nov 26 00:11:23 crc kubenswrapper[4875]: I1126 00:11:23.854144 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-wdpnp" podStartSLOduration=3.437620117 podStartE2EDuration="1m43.854127742s" podCreationTimestamp="2025-11-26 00:09:40 +0000 UTC" firstStartedPulling="2025-11-26 00:09:42.137567066 +0000 UTC m=+155.327542761" lastFinishedPulling="2025-11-26 00:11:22.554074691 +0000 UTC m=+255.744050386" observedRunningTime="2025-11-26 00:11:23.849864062 +0000 UTC m=+257.039839757" watchObservedRunningTime="2025-11-26 00:11:23.854127742 +0000 UTC m=+257.044103427" Nov 26 00:11:23 crc kubenswrapper[4875]: I1126 00:11:23.875461 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-jltf2" podStartSLOduration=2.629635437 podStartE2EDuration="1m43.875435989s" podCreationTimestamp="2025-11-26 00:09:40 +0000 UTC" firstStartedPulling="2025-11-26 00:09:42.160026144 +0000 UTC m=+155.350001839" lastFinishedPulling="2025-11-26 00:11:23.405826696 +0000 UTC m=+256.595802391" observedRunningTime="2025-11-26 00:11:23.874099136 +0000 UTC m=+257.064074831" watchObservedRunningTime="2025-11-26 00:11:23.875435989 +0000 UTC m=+257.065411734" Nov 26 00:11:24 crc kubenswrapper[4875]: I1126 00:11:24.840504 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wx746" event={"ID":"8bd0220e-3135-4767-b60b-e425535026fc","Type":"ContainerStarted","Data":"b98fda544a8ec27da75c17a9a9f70e83df5f74f101c5cf5874ee67909f3d50bf"} Nov 26 00:11:24 crc kubenswrapper[4875]: I1126 00:11:24.858952 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-wx746" podStartSLOduration=4.084562042 podStartE2EDuration="1m47.858929874s" podCreationTimestamp="2025-11-26 00:09:37 +0000 UTC" firstStartedPulling="2025-11-26 00:09:40.066395486 +0000 UTC m=+153.256371181" lastFinishedPulling="2025-11-26 00:11:23.840763318 +0000 UTC m=+257.030739013" observedRunningTime="2025-11-26 00:11:24.857137159 +0000 UTC m=+258.047112844" watchObservedRunningTime="2025-11-26 00:11:24.858929874 +0000 UTC m=+258.048905569" Nov 26 00:11:27 crc kubenswrapper[4875]: I1126 00:11:27.120487 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:11:27 crc kubenswrapper[4875]: I1126 00:11:27.120812 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:11:27 crc kubenswrapper[4875]: I1126 00:11:27.431120 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:11:27 crc kubenswrapper[4875]: I1126 00:11:27.431176 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:11:28 crc kubenswrapper[4875]: I1126 00:11:28.154285 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:11:28 crc kubenswrapper[4875]: I1126 00:11:28.154881 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:11:28 crc kubenswrapper[4875]: I1126 00:11:28.408055 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:11:28 crc kubenswrapper[4875]: I1126 00:11:28.408960 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:11:28 crc kubenswrapper[4875]: I1126 00:11:28.412884 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:11:28 crc kubenswrapper[4875]: I1126 00:11:28.440204 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:11:28 crc kubenswrapper[4875]: I1126 00:11:28.440249 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:11:28 crc kubenswrapper[4875]: I1126 00:11:28.452106 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:11:28 crc kubenswrapper[4875]: I1126 00:11:28.490936 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:11:28 crc kubenswrapper[4875]: I1126 00:11:28.901083 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:11:28 crc kubenswrapper[4875]: I1126 00:11:28.908299 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:11:29 crc kubenswrapper[4875]: I1126 00:11:29.786191 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:11:29 crc kubenswrapper[4875]: I1126 00:11:29.786568 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:11:29 crc kubenswrapper[4875]: I1126 00:11:29.827130 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:11:29 crc kubenswrapper[4875]: I1126 00:11:29.922127 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:11:30 crc kubenswrapper[4875]: I1126 00:11:30.188137 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:11:30 crc kubenswrapper[4875]: I1126 00:11:30.188179 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:11:30 crc kubenswrapper[4875]: I1126 00:11:30.224898 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:11:30 crc kubenswrapper[4875]: I1126 00:11:30.806618 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:11:30 crc kubenswrapper[4875]: I1126 00:11:30.807623 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:11:30 crc kubenswrapper[4875]: I1126 00:11:30.846110 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:11:30 crc kubenswrapper[4875]: I1126 00:11:30.931325 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:11:30 crc kubenswrapper[4875]: I1126 00:11:30.931405 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:11:30 crc kubenswrapper[4875]: I1126 00:11:30.996480 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bbw4f"] Nov 26 00:11:30 crc kubenswrapper[4875]: I1126 00:11:30.996821 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bbw4f" podUID="3db2a450-bce1-422c-bbf4-8676b8662c2e" containerName="registry-server" containerID="cri-o://5df940b2d998cccc0b893824ae3830e68551a99623756524df27a471f10c9e32" gracePeriod=2 Nov 26 00:11:31 crc kubenswrapper[4875]: I1126 00:11:31.207666 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:11:31 crc kubenswrapper[4875]: I1126 00:11:31.207971 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:11:31 crc kubenswrapper[4875]: I1126 00:11:31.245556 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:11:31 crc kubenswrapper[4875]: I1126 00:11:31.901402 4875 generic.go:334] "Generic (PLEG): container finished" podID="3db2a450-bce1-422c-bbf4-8676b8662c2e" containerID="5df940b2d998cccc0b893824ae3830e68551a99623756524df27a471f10c9e32" exitCode=0 Nov 26 00:11:31 crc kubenswrapper[4875]: I1126 00:11:31.901492 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbw4f" event={"ID":"3db2a450-bce1-422c-bbf4-8676b8662c2e","Type":"ContainerDied","Data":"5df940b2d998cccc0b893824ae3830e68551a99623756524df27a471f10c9e32"} Nov 26 00:11:31 crc kubenswrapper[4875]: I1126 00:11:31.938322 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.340138 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.529063 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-utilities\") pod \"3db2a450-bce1-422c-bbf4-8676b8662c2e\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.529350 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-catalog-content\") pod \"3db2a450-bce1-422c-bbf4-8676b8662c2e\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.529403 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nvsjg\" (UniqueName: \"kubernetes.io/projected/3db2a450-bce1-422c-bbf4-8676b8662c2e-kube-api-access-nvsjg\") pod \"3db2a450-bce1-422c-bbf4-8676b8662c2e\" (UID: \"3db2a450-bce1-422c-bbf4-8676b8662c2e\") " Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.529812 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-utilities" (OuterVolumeSpecName: "utilities") pod "3db2a450-bce1-422c-bbf4-8676b8662c2e" (UID: "3db2a450-bce1-422c-bbf4-8676b8662c2e"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.538813 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3db2a450-bce1-422c-bbf4-8676b8662c2e-kube-api-access-nvsjg" (OuterVolumeSpecName: "kube-api-access-nvsjg") pod "3db2a450-bce1-422c-bbf4-8676b8662c2e" (UID: "3db2a450-bce1-422c-bbf4-8676b8662c2e"). InnerVolumeSpecName "kube-api-access-nvsjg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.582143 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3db2a450-bce1-422c-bbf4-8676b8662c2e" (UID: "3db2a450-bce1-422c-bbf4-8676b8662c2e"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.630442 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.630475 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3db2a450-bce1-422c-bbf4-8676b8662c2e-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.630488 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nvsjg\" (UniqueName: \"kubernetes.io/projected/3db2a450-bce1-422c-bbf4-8676b8662c2e-kube-api-access-nvsjg\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.909518 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bbw4f" event={"ID":"3db2a450-bce1-422c-bbf4-8676b8662c2e","Type":"ContainerDied","Data":"4da80b9f6a94d76454c6bf56a1a9ded6928fdf3e1e752eefcc6998199d344dc2"} Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.909582 4875 scope.go:117] "RemoveContainer" containerID="5df940b2d998cccc0b893824ae3830e68551a99623756524df27a471f10c9e32" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.909596 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bbw4f" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.927175 4875 scope.go:117] "RemoveContainer" containerID="c855f9295e9b1daa71fc776ebe2b0d0a21c4813fc747b4bb5a636d13c603aef7" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.938616 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bbw4f"] Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.945393 4875 scope.go:117] "RemoveContainer" containerID="d4c0ce3a4c619049bdc9a3249b258644da9a8e4412b6ab95bb149cc023d9a6b7" Nov 26 00:11:32 crc kubenswrapper[4875]: I1126 00:11:32.946127 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bbw4f"] Nov 26 00:11:33 crc kubenswrapper[4875]: I1126 00:11:33.193842 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rgtxh"] Nov 26 00:11:33 crc kubenswrapper[4875]: I1126 00:11:33.194081 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-rgtxh" podUID="50ff6e6d-830c-40e0-9674-c3795df3eead" containerName="registry-server" containerID="cri-o://ce17bb87c3b5c1763171fda89918725c6c53911abdf43b4e5aff64923ddc9810" gracePeriod=2 Nov 26 00:11:33 crc kubenswrapper[4875]: I1126 00:11:33.393681 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wdpnp"] Nov 26 00:11:33 crc kubenswrapper[4875]: I1126 00:11:33.537703 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3db2a450-bce1-422c-bbf4-8676b8662c2e" path="/var/lib/kubelet/pods/3db2a450-bce1-422c-bbf4-8676b8662c2e/volumes" Nov 26 00:11:33 crc kubenswrapper[4875]: I1126 00:11:33.918319 4875 generic.go:334] "Generic (PLEG): container finished" podID="50ff6e6d-830c-40e0-9674-c3795df3eead" containerID="ce17bb87c3b5c1763171fda89918725c6c53911abdf43b4e5aff64923ddc9810" exitCode=0 Nov 26 00:11:33 crc kubenswrapper[4875]: I1126 00:11:33.918398 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgtxh" event={"ID":"50ff6e6d-830c-40e0-9674-c3795df3eead","Type":"ContainerDied","Data":"ce17bb87c3b5c1763171fda89918725c6c53911abdf43b4e5aff64923ddc9810"} Nov 26 00:11:33 crc kubenswrapper[4875]: I1126 00:11:33.919781 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-wdpnp" podUID="db812bf0-1b40-4520-8c30-0267012a87dc" containerName="registry-server" containerID="cri-o://102e1bb52c71fe60962bf4b93f826c71904cc1e9b4d58ad19043d5c4f462db97" gracePeriod=2 Nov 26 00:11:34 crc kubenswrapper[4875]: I1126 00:11:34.925471 4875 generic.go:334] "Generic (PLEG): container finished" podID="db812bf0-1b40-4520-8c30-0267012a87dc" containerID="102e1bb52c71fe60962bf4b93f826c71904cc1e9b4d58ad19043d5c4f462db97" exitCode=0 Nov 26 00:11:34 crc kubenswrapper[4875]: I1126 00:11:34.925608 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdpnp" event={"ID":"db812bf0-1b40-4520-8c30-0267012a87dc","Type":"ContainerDied","Data":"102e1bb52c71fe60962bf4b93f826c71904cc1e9b4d58ad19043d5c4f462db97"} Nov 26 00:11:34 crc kubenswrapper[4875]: I1126 00:11:34.927527 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-rgtxh" event={"ID":"50ff6e6d-830c-40e0-9674-c3795df3eead","Type":"ContainerDied","Data":"fe0cbbd3708657c77e92711b72b02a412e88ef3fde2d60b563ad57865e4ce539"} Nov 26 00:11:34 crc kubenswrapper[4875]: I1126 00:11:34.927560 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="fe0cbbd3708657c77e92711b72b02a412e88ef3fde2d60b563ad57865e4ce539" Nov 26 00:11:34 crc kubenswrapper[4875]: I1126 00:11:34.939317 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.066606 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-nkk46\" (UniqueName: \"kubernetes.io/projected/50ff6e6d-830c-40e0-9674-c3795df3eead-kube-api-access-nkk46\") pod \"50ff6e6d-830c-40e0-9674-c3795df3eead\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.066680 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-catalog-content\") pod \"50ff6e6d-830c-40e0-9674-c3795df3eead\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.066716 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-utilities\") pod \"50ff6e6d-830c-40e0-9674-c3795df3eead\" (UID: \"50ff6e6d-830c-40e0-9674-c3795df3eead\") " Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.067664 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-utilities" (OuterVolumeSpecName: "utilities") pod "50ff6e6d-830c-40e0-9674-c3795df3eead" (UID: "50ff6e6d-830c-40e0-9674-c3795df3eead"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.075557 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/50ff6e6d-830c-40e0-9674-c3795df3eead-kube-api-access-nkk46" (OuterVolumeSpecName: "kube-api-access-nkk46") pod "50ff6e6d-830c-40e0-9674-c3795df3eead" (UID: "50ff6e6d-830c-40e0-9674-c3795df3eead"). InnerVolumeSpecName "kube-api-access-nkk46". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.083638 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "50ff6e6d-830c-40e0-9674-c3795df3eead" (UID: "50ff6e6d-830c-40e0-9674-c3795df3eead"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.168343 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-nkk46\" (UniqueName: \"kubernetes.io/projected/50ff6e6d-830c-40e0-9674-c3795df3eead-kube-api-access-nkk46\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.168384 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.168396 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/50ff6e6d-830c-40e0-9674-c3795df3eead-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.932146 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-rgtxh" Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.953361 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-rgtxh"] Nov 26 00:11:35 crc kubenswrapper[4875]: I1126 00:11:35.959329 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-rgtxh"] Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.491699 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.686608 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-catalog-content\") pod \"db812bf0-1b40-4520-8c30-0267012a87dc\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.686666 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-utilities\") pod \"db812bf0-1b40-4520-8c30-0267012a87dc\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.686810 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b5pwr\" (UniqueName: \"kubernetes.io/projected/db812bf0-1b40-4520-8c30-0267012a87dc-kube-api-access-b5pwr\") pod \"db812bf0-1b40-4520-8c30-0267012a87dc\" (UID: \"db812bf0-1b40-4520-8c30-0267012a87dc\") " Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.687429 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-utilities" (OuterVolumeSpecName: "utilities") pod "db812bf0-1b40-4520-8c30-0267012a87dc" (UID: "db812bf0-1b40-4520-8c30-0267012a87dc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.705563 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/db812bf0-1b40-4520-8c30-0267012a87dc-kube-api-access-b5pwr" (OuterVolumeSpecName: "kube-api-access-b5pwr") pod "db812bf0-1b40-4520-8c30-0267012a87dc" (UID: "db812bf0-1b40-4520-8c30-0267012a87dc"). InnerVolumeSpecName "kube-api-access-b5pwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.788516 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b5pwr\" (UniqueName: \"kubernetes.io/projected/db812bf0-1b40-4520-8c30-0267012a87dc-kube-api-access-b5pwr\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.788551 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.938061 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-wdpnp" event={"ID":"db812bf0-1b40-4520-8c30-0267012a87dc","Type":"ContainerDied","Data":"f89418f126fb007923d5152cad7064bdad1be0757022745695dc20229c821a93"} Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.938103 4875 scope.go:117] "RemoveContainer" containerID="102e1bb52c71fe60962bf4b93f826c71904cc1e9b4d58ad19043d5c4f462db97" Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.938203 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-wdpnp" Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.957031 4875 scope.go:117] "RemoveContainer" containerID="4fa7e855be4ca8b52a7d6966693bef3b48b8917af61e112eea14a49bd939d405" Nov 26 00:11:36 crc kubenswrapper[4875]: I1126 00:11:36.975104 4875 scope.go:117] "RemoveContainer" containerID="c6b80953796083a5546336ad6b0425003c101cef68297c13041376daeb7b048b" Nov 26 00:11:37 crc kubenswrapper[4875]: I1126 00:11:37.132491 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "db812bf0-1b40-4520-8c30-0267012a87dc" (UID: "db812bf0-1b40-4520-8c30-0267012a87dc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:11:37 crc kubenswrapper[4875]: I1126 00:11:37.192837 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/db812bf0-1b40-4520-8c30-0267012a87dc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:37 crc kubenswrapper[4875]: I1126 00:11:37.266137 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-wdpnp"] Nov 26 00:11:37 crc kubenswrapper[4875]: I1126 00:11:37.269480 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-wdpnp"] Nov 26 00:11:37 crc kubenswrapper[4875]: I1126 00:11:37.470014 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:11:37 crc kubenswrapper[4875]: I1126 00:11:37.536138 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="50ff6e6d-830c-40e0-9674-c3795df3eead" path="/var/lib/kubelet/pods/50ff6e6d-830c-40e0-9674-c3795df3eead/volumes" Nov 26 00:11:37 crc kubenswrapper[4875]: I1126 00:11:37.536809 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="db812bf0-1b40-4520-8c30-0267012a87dc" path="/var/lib/kubelet/pods/db812bf0-1b40-4520-8c30-0267012a87dc/volumes" Nov 26 00:11:39 crc kubenswrapper[4875]: I1126 00:11:39.593679 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wx746"] Nov 26 00:11:39 crc kubenswrapper[4875]: I1126 00:11:39.594524 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-wx746" podUID="8bd0220e-3135-4767-b60b-e425535026fc" containerName="registry-server" containerID="cri-o://b98fda544a8ec27da75c17a9a9f70e83df5f74f101c5cf5874ee67909f3d50bf" gracePeriod=2 Nov 26 00:11:40 crc kubenswrapper[4875]: I1126 00:11:40.962243 4875 generic.go:334] "Generic (PLEG): container finished" podID="8bd0220e-3135-4767-b60b-e425535026fc" containerID="b98fda544a8ec27da75c17a9a9f70e83df5f74f101c5cf5874ee67909f3d50bf" exitCode=0 Nov 26 00:11:40 crc kubenswrapper[4875]: I1126 00:11:40.962286 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wx746" event={"ID":"8bd0220e-3135-4767-b60b-e425535026fc","Type":"ContainerDied","Data":"b98fda544a8ec27da75c17a9a9f70e83df5f74f101c5cf5874ee67909f3d50bf"} Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.224194 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.258685 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-catalog-content\") pod \"8bd0220e-3135-4767-b60b-e425535026fc\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.259069 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-rqbvh\" (UniqueName: \"kubernetes.io/projected/8bd0220e-3135-4767-b60b-e425535026fc-kube-api-access-rqbvh\") pod \"8bd0220e-3135-4767-b60b-e425535026fc\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.259141 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-utilities\") pod \"8bd0220e-3135-4767-b60b-e425535026fc\" (UID: \"8bd0220e-3135-4767-b60b-e425535026fc\") " Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.260240 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-utilities" (OuterVolumeSpecName: "utilities") pod "8bd0220e-3135-4767-b60b-e425535026fc" (UID: "8bd0220e-3135-4767-b60b-e425535026fc"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.280487 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8bd0220e-3135-4767-b60b-e425535026fc-kube-api-access-rqbvh" (OuterVolumeSpecName: "kube-api-access-rqbvh") pod "8bd0220e-3135-4767-b60b-e425535026fc" (UID: "8bd0220e-3135-4767-b60b-e425535026fc"). InnerVolumeSpecName "kube-api-access-rqbvh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.318230 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8bd0220e-3135-4767-b60b-e425535026fc" (UID: "8bd0220e-3135-4767-b60b-e425535026fc"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.360157 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.360203 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8bd0220e-3135-4767-b60b-e425535026fc-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.360218 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-rqbvh\" (UniqueName: \"kubernetes.io/projected/8bd0220e-3135-4767-b60b-e425535026fc-kube-api-access-rqbvh\") on node \"crc\" DevicePath \"\"" Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.969529 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-wx746" event={"ID":"8bd0220e-3135-4767-b60b-e425535026fc","Type":"ContainerDied","Data":"9db5d3d36b58426d7f0aa69ad9e96bc07cd4fb78f58c357ceff38b22fe1236b6"} Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.969574 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-wx746" Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.969590 4875 scope.go:117] "RemoveContainer" containerID="b98fda544a8ec27da75c17a9a9f70e83df5f74f101c5cf5874ee67909f3d50bf" Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.984693 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-wx746"] Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.986808 4875 scope.go:117] "RemoveContainer" containerID="a2f925fd829b8c6996ef9793da98b3ecedccc29b824806e26fcd91db164726ba" Nov 26 00:11:41 crc kubenswrapper[4875]: I1126 00:11:41.989529 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-wx746"] Nov 26 00:11:42 crc kubenswrapper[4875]: I1126 00:11:42.000396 4875 scope.go:117] "RemoveContainer" containerID="dc3b971584b5d912071714f4e3a25147d3e2896be4ef74b690222dea3024662b" Nov 26 00:11:43 crc kubenswrapper[4875]: I1126 00:11:43.536085 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8bd0220e-3135-4767-b60b-e425535026fc" path="/var/lib/kubelet/pods/8bd0220e-3135-4767-b60b-e425535026fc/volumes" Nov 26 00:11:49 crc kubenswrapper[4875]: I1126 00:11:49.974999 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v9znd"] Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.000242 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" podUID="0a3941af-01d7-44c0-8e51-cc719e7f4835" containerName="oauth-openshift" containerID="cri-o://af7676a12e680000a4bbfb6416b9279cf7d15c777d173b8abd6af2f459645244" gracePeriod=15 Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.135806 4875 generic.go:334] "Generic (PLEG): container finished" podID="0a3941af-01d7-44c0-8e51-cc719e7f4835" containerID="af7676a12e680000a4bbfb6416b9279cf7d15c777d173b8abd6af2f459645244" exitCode=0 Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.135853 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" event={"ID":"0a3941af-01d7-44c0-8e51-cc719e7f4835","Type":"ContainerDied","Data":"af7676a12e680000a4bbfb6416b9279cf7d15c777d173b8abd6af2f459645244"} Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.324579 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.358555 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-authentication/oauth-openshift-665b5dc687-gh6pc"] Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.358858 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3db2a450-bce1-422c-bbf4-8676b8662c2e" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.358918 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="3db2a450-bce1-422c-bbf4-8676b8662c2e" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.358934 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="601c8445-3908-4677-9df9-422f8e97d928" containerName="collect-profiles" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.358946 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="601c8445-3908-4677-9df9-422f8e97d928" containerName="collect-profiles" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359077 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0a3941af-01d7-44c0-8e51-cc719e7f4835" containerName="oauth-openshift" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359118 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="0a3941af-01d7-44c0-8e51-cc719e7f4835" containerName="oauth-openshift" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359138 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bd0220e-3135-4767-b60b-e425535026fc" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359148 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bd0220e-3135-4767-b60b-e425535026fc" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359161 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db812bf0-1b40-4520-8c30-0267012a87dc" containerName="extract-content" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359171 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="db812bf0-1b40-4520-8c30-0267012a87dc" containerName="extract-content" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359186 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db812bf0-1b40-4520-8c30-0267012a87dc" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359197 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="db812bf0-1b40-4520-8c30-0267012a87dc" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359215 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bd0220e-3135-4767-b60b-e425535026fc" containerName="extract-utilities" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359227 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bd0220e-3135-4767-b60b-e425535026fc" containerName="extract-utilities" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359240 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50ff6e6d-830c-40e0-9674-c3795df3eead" containerName="extract-content" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359250 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="50ff6e6d-830c-40e0-9674-c3795df3eead" containerName="extract-content" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359267 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3db2a450-bce1-422c-bbf4-8676b8662c2e" containerName="extract-utilities" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359278 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="3db2a450-bce1-422c-bbf4-8676b8662c2e" containerName="extract-utilities" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359293 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50ff6e6d-830c-40e0-9674-c3795df3eead" containerName="extract-utilities" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359307 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="50ff6e6d-830c-40e0-9674-c3795df3eead" containerName="extract-utilities" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359329 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="db812bf0-1b40-4520-8c30-0267012a87dc" containerName="extract-utilities" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359341 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="db812bf0-1b40-4520-8c30-0267012a87dc" containerName="extract-utilities" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359359 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8bd0220e-3135-4767-b60b-e425535026fc" containerName="extract-content" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359371 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8bd0220e-3135-4767-b60b-e425535026fc" containerName="extract-content" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359388 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="55a68700-c0f7-4ae5-a252-013c8fbab545" containerName="image-pruner" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359400 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="55a68700-c0f7-4ae5-a252-013c8fbab545" containerName="image-pruner" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359436 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="50ff6e6d-830c-40e0-9674-c3795df3eead" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359450 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="50ff6e6d-830c-40e0-9674-c3795df3eead" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359465 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3db2a450-bce1-422c-bbf4-8676b8662c2e" containerName="extract-content" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359514 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="3db2a450-bce1-422c-bbf4-8676b8662c2e" containerName="extract-content" Nov 26 00:12:15 crc kubenswrapper[4875]: E1126 00:12:15.359531 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d15feed8-dd77-4a82-ab54-3e64f49db01e" containerName="pruner" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359612 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="d15feed8-dd77-4a82-ab54-3e64f49db01e" containerName="pruner" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359849 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="50ff6e6d-830c-40e0-9674-c3795df3eead" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359872 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="601c8445-3908-4677-9df9-422f8e97d928" containerName="collect-profiles" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359886 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="db812bf0-1b40-4520-8c30-0267012a87dc" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359906 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="0a3941af-01d7-44c0-8e51-cc719e7f4835" containerName="oauth-openshift" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359953 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="3db2a450-bce1-422c-bbf4-8676b8662c2e" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359972 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="d15feed8-dd77-4a82-ab54-3e64f49db01e" containerName="pruner" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359986 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="55a68700-c0f7-4ae5-a252-013c8fbab545" containerName="image-pruner" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.359999 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8bd0220e-3135-4767-b60b-e425535026fc" containerName="registry-server" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.360791 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.389112 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-665b5dc687-gh6pc"] Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.432952 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-trusted-ca-bundle\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433026 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f4dwr\" (UniqueName: \"kubernetes.io/projected/0a3941af-01d7-44c0-8e51-cc719e7f4835-kube-api-access-f4dwr\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433054 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-dir\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433075 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-router-certs\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433105 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-serving-cert\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433126 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-provider-selection\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433154 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-session\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433202 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-login\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433237 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-idp-0-file-data\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433286 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-cliconfig\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433308 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-error\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433337 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-policies\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433367 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-ocp-branding-template\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433394 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-service-ca\") pod \"0a3941af-01d7-44c0-8e51-cc719e7f4835\" (UID: \"0a3941af-01d7-44c0-8e51-cc719e7f4835\") " Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.433143 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.434033 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-service-ca" (OuterVolumeSpecName: "v4-0-config-system-service-ca") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "v4-0-config-system-service-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.434047 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-trusted-ca-bundle" (OuterVolumeSpecName: "v4-0-config-system-trusted-ca-bundle") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "v4-0-config-system-trusted-ca-bundle". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.434603 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-cliconfig" (OuterVolumeSpecName: "v4-0-config-system-cliconfig") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "v4-0-config-system-cliconfig". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.434714 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-policies" (OuterVolumeSpecName: "audit-policies") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "audit-policies". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.445858 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-login" (OuterVolumeSpecName: "v4-0-config-user-template-login") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "v4-0-config-user-template-login". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.446275 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-provider-selection" (OuterVolumeSpecName: "v4-0-config-user-template-provider-selection") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "v4-0-config-user-template-provider-selection". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.447788 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-router-certs" (OuterVolumeSpecName: "v4-0-config-system-router-certs") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "v4-0-config-system-router-certs". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.448113 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-ocp-branding-template" (OuterVolumeSpecName: "v4-0-config-system-ocp-branding-template") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "v4-0-config-system-ocp-branding-template". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.448879 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0a3941af-01d7-44c0-8e51-cc719e7f4835-kube-api-access-f4dwr" (OuterVolumeSpecName: "kube-api-access-f4dwr") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "kube-api-access-f4dwr". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.450937 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-serving-cert" (OuterVolumeSpecName: "v4-0-config-system-serving-cert") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "v4-0-config-system-serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.455535 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-session" (OuterVolumeSpecName: "v4-0-config-system-session") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "v4-0-config-system-session". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.455686 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-idp-0-file-data" (OuterVolumeSpecName: "v4-0-config-user-idp-0-file-data") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "v4-0-config-user-idp-0-file-data". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.456671 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-error" (OuterVolumeSpecName: "v4-0-config-user-template-error") pod "0a3941af-01d7-44c0-8e51-cc719e7f4835" (UID: "0a3941af-01d7-44c0-8e51-cc719e7f4835"). InnerVolumeSpecName "v4-0-config-user-template-error". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.534290 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-kfz76\" (UniqueName: \"kubernetes.io/projected/5c401efa-0ac7-4df1-b072-893e6d088255-kube-api-access-kfz76\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.534340 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.534368 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-template-login\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535109 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-cliconfig\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535164 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c401efa-0ac7-4df1-b072-893e6d088255-audit-dir\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535181 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-template-error\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535207 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535231 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-session\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535254 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535274 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-router-certs\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535292 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-service-ca\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535379 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-audit-policies\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535397 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535437 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-serving-cert\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535513 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-cliconfig\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535527 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-error\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535538 4875 reconciler_common.go:293] "Volume detached for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-policies\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535551 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-ocp-branding-template\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535562 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-service-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535573 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-trusted-ca-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535584 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f4dwr\" (UniqueName: \"kubernetes.io/projected/0a3941af-01d7-44c0-8e51-cc719e7f4835-kube-api-access-f4dwr\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535593 4875 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/0a3941af-01d7-44c0-8e51-cc719e7f4835-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535603 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-router-certs\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535616 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535628 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-provider-selection\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535641 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-system-session\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535655 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-template-login\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.535666 4875 reconciler_common.go:293] "Volume detached for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/0a3941af-01d7-44c0-8e51-cc719e7f4835-v4-0-config-user-idp-0-file-data\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637271 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637336 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-session\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637355 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637377 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-router-certs\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637393 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-service-ca\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637481 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-audit-policies\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637508 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637533 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-serving-cert\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637569 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-kfz76\" (UniqueName: \"kubernetes.io/projected/5c401efa-0ac7-4df1-b072-893e6d088255-kube-api-access-kfz76\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637606 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-template-login\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637628 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637670 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-cliconfig\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637783 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c401efa-0ac7-4df1-b072-893e6d088255-audit-dir\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.637805 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-template-error\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.638296 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-service-ca\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-service-ca\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.638905 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-policies\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-audit-policies\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.639602 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-trusted-ca-bundle\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-trusted-ca-bundle\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.639978 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/5c401efa-0ac7-4df1-b072-893e6d088255-audit-dir\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.640705 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-cliconfig\" (UniqueName: \"kubernetes.io/configmap/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-cliconfig\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.641229 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-idp-0-file-data\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-idp-0-file-data\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.641992 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-error\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-template-error\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.643066 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-session\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-session\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.644002 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-provider-selection\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-template-provider-selection\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.644010 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-user-template-login\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-user-template-login\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.644284 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-router-certs\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-router-certs\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.644531 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-ocp-branding-template\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-ocp-branding-template\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.645356 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"v4-0-config-system-serving-cert\" (UniqueName: \"kubernetes.io/secret/5c401efa-0ac7-4df1-b072-893e6d088255-v4-0-config-system-serving-cert\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.664643 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-kfz76\" (UniqueName: \"kubernetes.io/projected/5c401efa-0ac7-4df1-b072-893e6d088255-kube-api-access-kfz76\") pod \"oauth-openshift-665b5dc687-gh6pc\" (UID: \"5c401efa-0ac7-4df1-b072-893e6d088255\") " pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.694700 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:15 crc kubenswrapper[4875]: I1126 00:12:15.912535 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-authentication/oauth-openshift-665b5dc687-gh6pc"] Nov 26 00:12:16 crc kubenswrapper[4875]: I1126 00:12:16.142384 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" event={"ID":"5c401efa-0ac7-4df1-b072-893e6d088255","Type":"ContainerStarted","Data":"ecad29d948f6e15a2f7124e36ec6ad474af51648f054ab542de2c7de89ef7bf0"} Nov 26 00:12:16 crc kubenswrapper[4875]: I1126 00:12:16.144823 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" event={"ID":"0a3941af-01d7-44c0-8e51-cc719e7f4835","Type":"ContainerDied","Data":"0bb75f288c5837dc214b487a88e223004e96f1ca1a50b783a71a6d9448f5c75d"} Nov 26 00:12:16 crc kubenswrapper[4875]: I1126 00:12:16.144931 4875 scope.go:117] "RemoveContainer" containerID="af7676a12e680000a4bbfb6416b9279cf7d15c777d173b8abd6af2f459645244" Nov 26 00:12:16 crc kubenswrapper[4875]: I1126 00:12:16.144935 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-authentication/oauth-openshift-558db77b4-v9znd" Nov 26 00:12:16 crc kubenswrapper[4875]: I1126 00:12:16.165879 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v9znd"] Nov 26 00:12:16 crc kubenswrapper[4875]: I1126 00:12:16.168449 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-authentication/oauth-openshift-558db77b4-v9znd"] Nov 26 00:12:17 crc kubenswrapper[4875]: I1126 00:12:17.152426 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" event={"ID":"5c401efa-0ac7-4df1-b072-893e6d088255","Type":"ContainerStarted","Data":"b02c0a15b952be8cf0fec1cd796affe25465860da1bb9829104529b0f522225d"} Nov 26 00:12:17 crc kubenswrapper[4875]: I1126 00:12:17.152752 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:17 crc kubenswrapper[4875]: I1126 00:12:17.159277 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" Nov 26 00:12:17 crc kubenswrapper[4875]: I1126 00:12:17.172939 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-authentication/oauth-openshift-665b5dc687-gh6pc" podStartSLOduration=27.172921667 podStartE2EDuration="27.172921667s" podCreationTimestamp="2025-11-26 00:11:50 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:12:17.169850082 +0000 UTC m=+310.359825807" watchObservedRunningTime="2025-11-26 00:12:17.172921667 +0000 UTC m=+310.362897352" Nov 26 00:12:17 crc kubenswrapper[4875]: I1126 00:12:17.534582 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0a3941af-01d7-44c0-8e51-cc719e7f4835" path="/var/lib/kubelet/pods/0a3941af-01d7-44c0-8e51-cc719e7f4835/volumes" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.055210 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sgwj7"] Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.057646 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-sgwj7" podUID="b9a62657-d64a-4306-ba77-17417763f811" containerName="registry-server" containerID="cri-o://d59a2c580620323609b46be216adb653f119352da9aec2af3a895279a9690934" gracePeriod=30 Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.065494 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b4kmf"] Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.065807 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-b4kmf" podUID="830b0136-66d9-47cb-9a99-f858deae4a28" containerName="registry-server" containerID="cri-o://7e4e0cbe2f33ce20dd719319fc7186faae0b0cf00b67753c0a4fb0c3bfd56de5" gracePeriod=30 Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.077616 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6ml5v"] Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.078098 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" podUID="d725db0e-68dc-4655-ae0f-8e847ef651c3" containerName="marketplace-operator" containerID="cri-o://3750e44f310d5540d69e1a766085b11f8211445703ac851ff330ac7ec2368c45" gracePeriod=30 Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.088811 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-425kq"] Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.091197 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-425kq" podUID="9e709507-2042-4993-a404-c5d8749b0da7" containerName="registry-server" containerID="cri-o://5e57024b97d3b0000c1a8e26536d619a6569a9495c9dfe5216fe3a4fd600d49e" gracePeriod=30 Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.092542 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jltf2"] Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.092882 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-jltf2" podUID="a2335e94-5d68-4b3e-a13b-37296965798d" containerName="registry-server" containerID="cri-o://7a1e8eb4d0a22760d011ec88c591c730f48cda76eb9e4c090d0c1559dead344a" gracePeriod=30 Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.103325 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bbdxg"] Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.104163 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.122124 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bbdxg"] Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.137446 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bbdxg\" (UID: \"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.137511 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bbdxg\" (UID: \"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.137529 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4ftwq\" (UniqueName: \"kubernetes.io/projected/ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1-kube-api-access-4ftwq\") pod \"marketplace-operator-79b997595-bbdxg\" (UID: \"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.224402 4875 generic.go:334] "Generic (PLEG): container finished" podID="d725db0e-68dc-4655-ae0f-8e847ef651c3" containerID="3750e44f310d5540d69e1a766085b11f8211445703ac851ff330ac7ec2368c45" exitCode=0 Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.224539 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" event={"ID":"d725db0e-68dc-4655-ae0f-8e847ef651c3","Type":"ContainerDied","Data":"3750e44f310d5540d69e1a766085b11f8211445703ac851ff330ac7ec2368c45"} Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.228018 4875 generic.go:334] "Generic (PLEG): container finished" podID="830b0136-66d9-47cb-9a99-f858deae4a28" containerID="7e4e0cbe2f33ce20dd719319fc7186faae0b0cf00b67753c0a4fb0c3bfd56de5" exitCode=0 Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.228915 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4kmf" event={"ID":"830b0136-66d9-47cb-9a99-f858deae4a28","Type":"ContainerDied","Data":"7e4e0cbe2f33ce20dd719319fc7186faae0b0cf00b67753c0a4fb0c3bfd56de5"} Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.232563 4875 generic.go:334] "Generic (PLEG): container finished" podID="b9a62657-d64a-4306-ba77-17417763f811" containerID="d59a2c580620323609b46be216adb653f119352da9aec2af3a895279a9690934" exitCode=0 Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.232635 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sgwj7" event={"ID":"b9a62657-d64a-4306-ba77-17417763f811","Type":"ContainerDied","Data":"d59a2c580620323609b46be216adb653f119352da9aec2af3a895279a9690934"} Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.235255 4875 generic.go:334] "Generic (PLEG): container finished" podID="9e709507-2042-4993-a404-c5d8749b0da7" containerID="5e57024b97d3b0000c1a8e26536d619a6569a9495c9dfe5216fe3a4fd600d49e" exitCode=0 Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.235336 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-425kq" event={"ID":"9e709507-2042-4993-a404-c5d8749b0da7","Type":"ContainerDied","Data":"5e57024b97d3b0000c1a8e26536d619a6569a9495c9dfe5216fe3a4fd600d49e"} Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.238533 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bbdxg\" (UID: \"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.238589 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bbdxg\" (UID: \"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.238608 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4ftwq\" (UniqueName: \"kubernetes.io/projected/ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1-kube-api-access-4ftwq\") pod \"marketplace-operator-79b997595-bbdxg\" (UID: \"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.242608 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1-marketplace-trusted-ca\") pod \"marketplace-operator-79b997595-bbdxg\" (UID: \"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.243828 4875 generic.go:334] "Generic (PLEG): container finished" podID="a2335e94-5d68-4b3e-a13b-37296965798d" containerID="7a1e8eb4d0a22760d011ec88c591c730f48cda76eb9e4c090d0c1559dead344a" exitCode=0 Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.244042 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jltf2" event={"ID":"a2335e94-5d68-4b3e-a13b-37296965798d","Type":"ContainerDied","Data":"7a1e8eb4d0a22760d011ec88c591c730f48cda76eb9e4c090d0c1559dead344a"} Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.246543 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1-marketplace-operator-metrics\") pod \"marketplace-operator-79b997595-bbdxg\" (UID: \"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.256914 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4ftwq\" (UniqueName: \"kubernetes.io/projected/ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1-kube-api-access-4ftwq\") pod \"marketplace-operator-79b997595-bbdxg\" (UID: \"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1\") " pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.469264 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.492010 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.493971 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.500474 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.512114 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.548163 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-dhv88\" (UniqueName: \"kubernetes.io/projected/9e709507-2042-4993-a404-c5d8749b0da7-kube-api-access-dhv88\") pod \"9e709507-2042-4993-a404-c5d8749b0da7\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.550998 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-catalog-content\") pod \"830b0136-66d9-47cb-9a99-f858deae4a28\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.551039 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-catalog-content\") pod \"b9a62657-d64a-4306-ba77-17417763f811\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.551062 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-catalog-content\") pod \"9e709507-2042-4993-a404-c5d8749b0da7\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.555515 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.556235 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9e709507-2042-4993-a404-c5d8749b0da7-kube-api-access-dhv88" (OuterVolumeSpecName: "kube-api-access-dhv88") pod "9e709507-2042-4993-a404-c5d8749b0da7" (UID: "9e709507-2042-4993-a404-c5d8749b0da7"). InnerVolumeSpecName "kube-api-access-dhv88". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.578119 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-dhv88\" (UniqueName: \"kubernetes.io/projected/9e709507-2042-4993-a404-c5d8749b0da7-kube-api-access-dhv88\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.587261 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "9e709507-2042-4993-a404-c5d8749b0da7" (UID: "9e709507-2042-4993-a404-c5d8749b0da7"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.619001 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "830b0136-66d9-47cb-9a99-f858deae4a28" (UID: "830b0136-66d9-47cb-9a99-f858deae4a28"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.640552 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b9a62657-d64a-4306-ba77-17417763f811" (UID: "b9a62657-d64a-4306-ba77-17417763f811"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679097 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-bbdxg"] Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679608 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-trusted-ca\") pod \"d725db0e-68dc-4655-ae0f-8e847ef651c3\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679647 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mcrm2\" (UniqueName: \"kubernetes.io/projected/830b0136-66d9-47cb-9a99-f858deae4a28-kube-api-access-mcrm2\") pod \"830b0136-66d9-47cb-9a99-f858deae4a28\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679675 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-utilities\") pod \"9e709507-2042-4993-a404-c5d8749b0da7\" (UID: \"9e709507-2042-4993-a404-c5d8749b0da7\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679697 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-utilities\") pod \"b9a62657-d64a-4306-ba77-17417763f811\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679713 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-utilities\") pod \"a2335e94-5d68-4b3e-a13b-37296965798d\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679757 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tv45p\" (UniqueName: \"kubernetes.io/projected/d725db0e-68dc-4655-ae0f-8e847ef651c3-kube-api-access-tv45p\") pod \"d725db0e-68dc-4655-ae0f-8e847ef651c3\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679773 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-catalog-content\") pod \"a2335e94-5d68-4b3e-a13b-37296965798d\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679792 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-utilities\") pod \"830b0136-66d9-47cb-9a99-f858deae4a28\" (UID: \"830b0136-66d9-47cb-9a99-f858deae4a28\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679817 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-hnlxn\" (UniqueName: \"kubernetes.io/projected/a2335e94-5d68-4b3e-a13b-37296965798d-kube-api-access-hnlxn\") pod \"a2335e94-5d68-4b3e-a13b-37296965798d\" (UID: \"a2335e94-5d68-4b3e-a13b-37296965798d\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679845 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-operator-metrics\") pod \"d725db0e-68dc-4655-ae0f-8e847ef651c3\" (UID: \"d725db0e-68dc-4655-ae0f-8e847ef651c3\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.679867 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-r9ld4\" (UniqueName: \"kubernetes.io/projected/b9a62657-d64a-4306-ba77-17417763f811-kube-api-access-r9ld4\") pod \"b9a62657-d64a-4306-ba77-17417763f811\" (UID: \"b9a62657-d64a-4306-ba77-17417763f811\") " Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.680085 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.680096 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.680106 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.680605 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-trusted-ca" (OuterVolumeSpecName: "marketplace-trusted-ca") pod "d725db0e-68dc-4655-ae0f-8e847ef651c3" (UID: "d725db0e-68dc-4655-ae0f-8e847ef651c3"). InnerVolumeSpecName "marketplace-trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.682975 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-utilities" (OuterVolumeSpecName: "utilities") pod "9e709507-2042-4993-a404-c5d8749b0da7" (UID: "9e709507-2042-4993-a404-c5d8749b0da7"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.683217 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-utilities" (OuterVolumeSpecName: "utilities") pod "b9a62657-d64a-4306-ba77-17417763f811" (UID: "b9a62657-d64a-4306-ba77-17417763f811"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.683603 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-utilities" (OuterVolumeSpecName: "utilities") pod "a2335e94-5d68-4b3e-a13b-37296965798d" (UID: "a2335e94-5d68-4b3e-a13b-37296965798d"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.683798 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9a62657-d64a-4306-ba77-17417763f811-kube-api-access-r9ld4" (OuterVolumeSpecName: "kube-api-access-r9ld4") pod "b9a62657-d64a-4306-ba77-17417763f811" (UID: "b9a62657-d64a-4306-ba77-17417763f811"). InnerVolumeSpecName "kube-api-access-r9ld4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.683850 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-utilities" (OuterVolumeSpecName: "utilities") pod "830b0136-66d9-47cb-9a99-f858deae4a28" (UID: "830b0136-66d9-47cb-9a99-f858deae4a28"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.686614 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a2335e94-5d68-4b3e-a13b-37296965798d-kube-api-access-hnlxn" (OuterVolumeSpecName: "kube-api-access-hnlxn") pod "a2335e94-5d68-4b3e-a13b-37296965798d" (UID: "a2335e94-5d68-4b3e-a13b-37296965798d"). InnerVolumeSpecName "kube-api-access-hnlxn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.686631 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/830b0136-66d9-47cb-9a99-f858deae4a28-kube-api-access-mcrm2" (OuterVolumeSpecName: "kube-api-access-mcrm2") pod "830b0136-66d9-47cb-9a99-f858deae4a28" (UID: "830b0136-66d9-47cb-9a99-f858deae4a28"). InnerVolumeSpecName "kube-api-access-mcrm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.686850 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-operator-metrics" (OuterVolumeSpecName: "marketplace-operator-metrics") pod "d725db0e-68dc-4655-ae0f-8e847ef651c3" (UID: "d725db0e-68dc-4655-ae0f-8e847ef651c3"). InnerVolumeSpecName "marketplace-operator-metrics". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.686963 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/d725db0e-68dc-4655-ae0f-8e847ef651c3-kube-api-access-tv45p" (OuterVolumeSpecName: "kube-api-access-tv45p") pod "d725db0e-68dc-4655-ae0f-8e847ef651c3" (UID: "d725db0e-68dc-4655-ae0f-8e847ef651c3"). InnerVolumeSpecName "kube-api-access-tv45p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: W1126 00:12:31.690342 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podac9cfb7a_c7d1_45ae_8ce1_7613fc318ec1.slice/crio-3629a5a1bb2571a44878f44eab7901bbebec6be375ebe8b2ff0ce80864eb257c WatchSource:0}: Error finding container 3629a5a1bb2571a44878f44eab7901bbebec6be375ebe8b2ff0ce80864eb257c: Status 404 returned error can't find the container with id 3629a5a1bb2571a44878f44eab7901bbebec6be375ebe8b2ff0ce80864eb257c Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.776721 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a2335e94-5d68-4b3e-a13b-37296965798d" (UID: "a2335e94-5d68-4b3e-a13b-37296965798d"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.781785 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/830b0136-66d9-47cb-9a99-f858deae4a28-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.781830 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-hnlxn\" (UniqueName: \"kubernetes.io/projected/a2335e94-5d68-4b3e-a13b-37296965798d-kube-api-access-hnlxn\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.781846 4875 reconciler_common.go:293] "Volume detached for volume \"marketplace-operator-metrics\" (UniqueName: \"kubernetes.io/secret/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-operator-metrics\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.781860 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-r9ld4\" (UniqueName: \"kubernetes.io/projected/b9a62657-d64a-4306-ba77-17417763f811-kube-api-access-r9ld4\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.781870 4875 reconciler_common.go:293] "Volume detached for volume \"marketplace-trusted-ca\" (UniqueName: \"kubernetes.io/configmap/d725db0e-68dc-4655-ae0f-8e847ef651c3-marketplace-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.781882 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mcrm2\" (UniqueName: \"kubernetes.io/projected/830b0136-66d9-47cb-9a99-f858deae4a28-kube-api-access-mcrm2\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.781902 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/9e709507-2042-4993-a404-c5d8749b0da7-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.781915 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b9a62657-d64a-4306-ba77-17417763f811-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.781926 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.781936 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tv45p\" (UniqueName: \"kubernetes.io/projected/d725db0e-68dc-4655-ae0f-8e847ef651c3-kube-api-access-tv45p\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:31 crc kubenswrapper[4875]: I1126 00:12:31.781944 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a2335e94-5d68-4b3e-a13b-37296965798d-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.250278 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-sgwj7" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.250274 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-sgwj7" event={"ID":"b9a62657-d64a-4306-ba77-17417763f811","Type":"ContainerDied","Data":"449b7582df2049da8e10cec74de4b5341aab0b276acb1b63b98006ff035541ea"} Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.250796 4875 scope.go:117] "RemoveContainer" containerID="d59a2c580620323609b46be216adb653f119352da9aec2af3a895279a9690934" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.251941 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" event={"ID":"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1","Type":"ContainerStarted","Data":"7674e5fccd4aceddc7997075f6ead866729f1b3c0f825a5e89cd507d655443e9"} Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.251986 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" event={"ID":"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1","Type":"ContainerStarted","Data":"3629a5a1bb2571a44878f44eab7901bbebec6be375ebe8b2ff0ce80864eb257c"} Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.252367 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.254274 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-425kq" event={"ID":"9e709507-2042-4993-a404-c5d8749b0da7","Type":"ContainerDied","Data":"97833869429a3fe1c5899f93fe3690a2aa5ae7dfa6ec9acc7bf72df059673af2"} Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.254355 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-425kq" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.256926 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.258394 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-jltf2" event={"ID":"a2335e94-5d68-4b3e-a13b-37296965798d","Type":"ContainerDied","Data":"c7a27988fe63e872c3c0fbe035ede00b638f68089210fe42c6d725618886b7c8"} Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.258499 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-jltf2" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.260785 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.261241 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-6ml5v" event={"ID":"d725db0e-68dc-4655-ae0f-8e847ef651c3","Type":"ContainerDied","Data":"f626bdeb5ea4d83aa18d9986371b58f7182fe60152c309ed0bf2d3d9f5dbaf65"} Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.263791 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-b4kmf" event={"ID":"830b0136-66d9-47cb-9a99-f858deae4a28","Type":"ContainerDied","Data":"dfebe8857f0ec0eda2ad99bc5456120bba0a289dac8f1480e0f09d8ad7fdd5c1"} Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.263910 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-b4kmf" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.267110 4875 scope.go:117] "RemoveContainer" containerID="dc456bf79b9eba53dad87604c7d1658afaf9e7cc2eb45073f6a2eedd4f75e001" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.275256 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" podStartSLOduration=1.2749703270000001 podStartE2EDuration="1.274970327s" podCreationTimestamp="2025-11-26 00:12:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:12:32.273009144 +0000 UTC m=+325.462984839" watchObservedRunningTime="2025-11-26 00:12:32.274970327 +0000 UTC m=+325.464946022" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.291168 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-sgwj7"] Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.294929 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-sgwj7"] Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.303388 4875 scope.go:117] "RemoveContainer" containerID="5a5fc9439ef61a2ad5eebf6abaa11ac6ae52c5626405d4d918eabd417ade2f38" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.328559 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-425kq"] Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.329289 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-425kq"] Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.341375 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6ml5v"] Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.349557 4875 scope.go:117] "RemoveContainer" containerID="5e57024b97d3b0000c1a8e26536d619a6569a9495c9dfe5216fe3a4fd600d49e" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.349676 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/marketplace-operator-79b997595-6ml5v"] Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.354864 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-b4kmf"] Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.359765 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-b4kmf"] Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.362584 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-jltf2"] Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.365023 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-jltf2"] Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.368178 4875 scope.go:117] "RemoveContainer" containerID="f900628c22e2bb685c41907d106c44fcf8cc2627f26b8ac8c3d300b6da063f6b" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.379883 4875 scope.go:117] "RemoveContainer" containerID="f5cba063a6fa84676f9100a044177cc1fcebcb24f8a8383a7c5e1ebde1b0cb47" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.399256 4875 scope.go:117] "RemoveContainer" containerID="7a1e8eb4d0a22760d011ec88c591c730f48cda76eb9e4c090d0c1559dead344a" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.418355 4875 scope.go:117] "RemoveContainer" containerID="f5ae930f5cb59c4f207238a32693ef2b71f4580141e002a4c9ead1c20dca013e" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.431571 4875 scope.go:117] "RemoveContainer" containerID="6e7814cb6abd3015a2987f861f8be5a17fb87399f03db55010bd76a79e289c79" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.444008 4875 scope.go:117] "RemoveContainer" containerID="3750e44f310d5540d69e1a766085b11f8211445703ac851ff330ac7ec2368c45" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.456187 4875 scope.go:117] "RemoveContainer" containerID="7e4e0cbe2f33ce20dd719319fc7186faae0b0cf00b67753c0a4fb0c3bfd56de5" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.470014 4875 scope.go:117] "RemoveContainer" containerID="c8aed07714d6c8aefdb0e6a5abf839b0f5e3de75dca4dbb0884152034c9c0e17" Nov 26 00:12:32 crc kubenswrapper[4875]: I1126 00:12:32.484212 4875 scope.go:117] "RemoveContainer" containerID="12bfbd7927ee7b660cec73359c10e24959ce8224a10f55574c21ed1431fabfad" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.275911 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-62ddv"] Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276180 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a62657-d64a-4306-ba77-17417763f811" containerName="extract-content" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276192 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a62657-d64a-4306-ba77-17417763f811" containerName="extract-content" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276221 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="830b0136-66d9-47cb-9a99-f858deae4a28" containerName="extract-content" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276228 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="830b0136-66d9-47cb-9a99-f858deae4a28" containerName="extract-content" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276237 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="830b0136-66d9-47cb-9a99-f858deae4a28" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276245 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="830b0136-66d9-47cb-9a99-f858deae4a28" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276251 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2335e94-5d68-4b3e-a13b-37296965798d" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276256 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2335e94-5d68-4b3e-a13b-37296965798d" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276266 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a62657-d64a-4306-ba77-17417763f811" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276271 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a62657-d64a-4306-ba77-17417763f811" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276281 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="830b0136-66d9-47cb-9a99-f858deae4a28" containerName="extract-utilities" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276303 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="830b0136-66d9-47cb-9a99-f858deae4a28" containerName="extract-utilities" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276313 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9a62657-d64a-4306-ba77-17417763f811" containerName="extract-utilities" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276320 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9a62657-d64a-4306-ba77-17417763f811" containerName="extract-utilities" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276329 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e709507-2042-4993-a404-c5d8749b0da7" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276335 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e709507-2042-4993-a404-c5d8749b0da7" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276341 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e709507-2042-4993-a404-c5d8749b0da7" containerName="extract-content" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276346 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e709507-2042-4993-a404-c5d8749b0da7" containerName="extract-content" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276356 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2335e94-5d68-4b3e-a13b-37296965798d" containerName="extract-content" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276380 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2335e94-5d68-4b3e-a13b-37296965798d" containerName="extract-content" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276394 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a2335e94-5d68-4b3e-a13b-37296965798d" containerName="extract-utilities" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276456 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="a2335e94-5d68-4b3e-a13b-37296965798d" containerName="extract-utilities" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276471 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="d725db0e-68dc-4655-ae0f-8e847ef651c3" containerName="marketplace-operator" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276478 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="d725db0e-68dc-4655-ae0f-8e847ef651c3" containerName="marketplace-operator" Nov 26 00:12:33 crc kubenswrapper[4875]: E1126 00:12:33.276490 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9e709507-2042-4993-a404-c5d8749b0da7" containerName="extract-utilities" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276495 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="9e709507-2042-4993-a404-c5d8749b0da7" containerName="extract-utilities" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276627 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="9e709507-2042-4993-a404-c5d8749b0da7" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276640 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="a2335e94-5d68-4b3e-a13b-37296965798d" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276650 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="d725db0e-68dc-4655-ae0f-8e847ef651c3" containerName="marketplace-operator" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276658 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9a62657-d64a-4306-ba77-17417763f811" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.276665 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="830b0136-66d9-47cb-9a99-f858deae4a28" containerName="registry-server" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.281287 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.282031 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-62ddv"] Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.283763 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.401810 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4194a5df-cccf-4047-b5e4-d4f6610a2b8f-utilities\") pod \"certified-operators-62ddv\" (UID: \"4194a5df-cccf-4047-b5e4-d4f6610a2b8f\") " pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.401864 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-k2x8s\" (UniqueName: \"kubernetes.io/projected/4194a5df-cccf-4047-b5e4-d4f6610a2b8f-kube-api-access-k2x8s\") pod \"certified-operators-62ddv\" (UID: \"4194a5df-cccf-4047-b5e4-d4f6610a2b8f\") " pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.401992 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4194a5df-cccf-4047-b5e4-d4f6610a2b8f-catalog-content\") pod \"certified-operators-62ddv\" (UID: \"4194a5df-cccf-4047-b5e4-d4f6610a2b8f\") " pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.471663 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7k6"] Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.473256 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.476658 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-marketplace-dockercfg-x2ctb" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.480971 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7k6"] Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.503354 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vh9kg\" (UniqueName: \"kubernetes.io/projected/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-kube-api-access-vh9kg\") pod \"redhat-marketplace-9p7k6\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.503452 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4194a5df-cccf-4047-b5e4-d4f6610a2b8f-catalog-content\") pod \"certified-operators-62ddv\" (UID: \"4194a5df-cccf-4047-b5e4-d4f6610a2b8f\") " pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.503478 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-utilities\") pod \"redhat-marketplace-9p7k6\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.503519 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4194a5df-cccf-4047-b5e4-d4f6610a2b8f-utilities\") pod \"certified-operators-62ddv\" (UID: \"4194a5df-cccf-4047-b5e4-d4f6610a2b8f\") " pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.503540 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-k2x8s\" (UniqueName: \"kubernetes.io/projected/4194a5df-cccf-4047-b5e4-d4f6610a2b8f-kube-api-access-k2x8s\") pod \"certified-operators-62ddv\" (UID: \"4194a5df-cccf-4047-b5e4-d4f6610a2b8f\") " pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.503592 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-catalog-content\") pod \"redhat-marketplace-9p7k6\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.503916 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/4194a5df-cccf-4047-b5e4-d4f6610a2b8f-catalog-content\") pod \"certified-operators-62ddv\" (UID: \"4194a5df-cccf-4047-b5e4-d4f6610a2b8f\") " pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.504007 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/4194a5df-cccf-4047-b5e4-d4f6610a2b8f-utilities\") pod \"certified-operators-62ddv\" (UID: \"4194a5df-cccf-4047-b5e4-d4f6610a2b8f\") " pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.522648 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-k2x8s\" (UniqueName: \"kubernetes.io/projected/4194a5df-cccf-4047-b5e4-d4f6610a2b8f-kube-api-access-k2x8s\") pod \"certified-operators-62ddv\" (UID: \"4194a5df-cccf-4047-b5e4-d4f6610a2b8f\") " pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.535982 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="830b0136-66d9-47cb-9a99-f858deae4a28" path="/var/lib/kubelet/pods/830b0136-66d9-47cb-9a99-f858deae4a28/volumes" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.536588 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="9e709507-2042-4993-a404-c5d8749b0da7" path="/var/lib/kubelet/pods/9e709507-2042-4993-a404-c5d8749b0da7/volumes" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.537166 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a2335e94-5d68-4b3e-a13b-37296965798d" path="/var/lib/kubelet/pods/a2335e94-5d68-4b3e-a13b-37296965798d/volumes" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.538334 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b9a62657-d64a-4306-ba77-17417763f811" path="/var/lib/kubelet/pods/b9a62657-d64a-4306-ba77-17417763f811/volumes" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.538943 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="d725db0e-68dc-4655-ae0f-8e847ef651c3" path="/var/lib/kubelet/pods/d725db0e-68dc-4655-ae0f-8e847ef651c3/volumes" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.604863 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vh9kg\" (UniqueName: \"kubernetes.io/projected/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-kube-api-access-vh9kg\") pod \"redhat-marketplace-9p7k6\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.604934 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-utilities\") pod \"redhat-marketplace-9p7k6\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.604986 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-catalog-content\") pod \"redhat-marketplace-9p7k6\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.605152 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.605489 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-catalog-content\") pod \"redhat-marketplace-9p7k6\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.605584 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-utilities\") pod \"redhat-marketplace-9p7k6\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.620197 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vh9kg\" (UniqueName: \"kubernetes.io/projected/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-kube-api-access-vh9kg\") pod \"redhat-marketplace-9p7k6\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.794210 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:33 crc kubenswrapper[4875]: I1126 00:12:33.987501 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-62ddv"] Nov 26 00:12:34 crc kubenswrapper[4875]: I1126 00:12:34.062375 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7k6"] Nov 26 00:12:34 crc kubenswrapper[4875]: W1126 00:12:34.068817 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda9184a91_8dc4_47a4_a62f_23dd90cfd3fd.slice/crio-2ff268aca290d6fb614721865166e509518102349782d0ae468182d324e536a0 WatchSource:0}: Error finding container 2ff268aca290d6fb614721865166e509518102349782d0ae468182d324e536a0: Status 404 returned error can't find the container with id 2ff268aca290d6fb614721865166e509518102349782d0ae468182d324e536a0 Nov 26 00:12:34 crc kubenswrapper[4875]: I1126 00:12:34.290841 4875 generic.go:334] "Generic (PLEG): container finished" podID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" containerID="7f6a065c83fb0961c5c9975f25d18c403b15e58d5c8c1e3783c4dca5cbe09ef8" exitCode=0 Nov 26 00:12:34 crc kubenswrapper[4875]: I1126 00:12:34.290937 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7k6" event={"ID":"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd","Type":"ContainerDied","Data":"7f6a065c83fb0961c5c9975f25d18c403b15e58d5c8c1e3783c4dca5cbe09ef8"} Nov 26 00:12:34 crc kubenswrapper[4875]: I1126 00:12:34.291197 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7k6" event={"ID":"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd","Type":"ContainerStarted","Data":"2ff268aca290d6fb614721865166e509518102349782d0ae468182d324e536a0"} Nov 26 00:12:34 crc kubenswrapper[4875]: I1126 00:12:34.293381 4875 generic.go:334] "Generic (PLEG): container finished" podID="4194a5df-cccf-4047-b5e4-d4f6610a2b8f" containerID="a84d1143a9c56b74a23ad0663daf82770566583395f54d4d3f647d51e9020434" exitCode=0 Nov 26 00:12:34 crc kubenswrapper[4875]: I1126 00:12:34.294113 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-62ddv" event={"ID":"4194a5df-cccf-4047-b5e4-d4f6610a2b8f","Type":"ContainerDied","Data":"a84d1143a9c56b74a23ad0663daf82770566583395f54d4d3f647d51e9020434"} Nov 26 00:12:34 crc kubenswrapper[4875]: I1126 00:12:34.294164 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-62ddv" event={"ID":"4194a5df-cccf-4047-b5e4-d4f6610a2b8f","Type":"ContainerStarted","Data":"6bafe6a1ff612aeac23fe525292e1afbe1bdbd5364f7efec81400d7c0bc72272"} Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.301135 4875 generic.go:334] "Generic (PLEG): container finished" podID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" containerID="f4e154008580e6279dace524a49f8b41977fe85e7baa872c33a6093fbf16c848" exitCode=0 Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.301317 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7k6" event={"ID":"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd","Type":"ContainerDied","Data":"f4e154008580e6279dace524a49f8b41977fe85e7baa872c33a6093fbf16c848"} Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.304286 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-62ddv" event={"ID":"4194a5df-cccf-4047-b5e4-d4f6610a2b8f","Type":"ContainerStarted","Data":"98bb2787dc7db51d75ba08181f1d301ed73b768c27677ebe6b196355a5014038"} Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.674904 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-5bvtz"] Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.676821 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.679353 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.688615 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5bvtz"] Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.732861 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ef35133-3e1a-4f40-9aff-21849f7683d4-utilities\") pod \"community-operators-5bvtz\" (UID: \"0ef35133-3e1a-4f40-9aff-21849f7683d4\") " pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.733165 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-4f5g8\" (UniqueName: \"kubernetes.io/projected/0ef35133-3e1a-4f40-9aff-21849f7683d4-kube-api-access-4f5g8\") pod \"community-operators-5bvtz\" (UID: \"0ef35133-3e1a-4f40-9aff-21849f7683d4\") " pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.733289 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ef35133-3e1a-4f40-9aff-21849f7683d4-catalog-content\") pod \"community-operators-5bvtz\" (UID: \"0ef35133-3e1a-4f40-9aff-21849f7683d4\") " pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.836319 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ef35133-3e1a-4f40-9aff-21849f7683d4-catalog-content\") pod \"community-operators-5bvtz\" (UID: \"0ef35133-3e1a-4f40-9aff-21849f7683d4\") " pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.836377 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ef35133-3e1a-4f40-9aff-21849f7683d4-utilities\") pod \"community-operators-5bvtz\" (UID: \"0ef35133-3e1a-4f40-9aff-21849f7683d4\") " pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.836532 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-4f5g8\" (UniqueName: \"kubernetes.io/projected/0ef35133-3e1a-4f40-9aff-21849f7683d4-kube-api-access-4f5g8\") pod \"community-operators-5bvtz\" (UID: \"0ef35133-3e1a-4f40-9aff-21849f7683d4\") " pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.837677 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0ef35133-3e1a-4f40-9aff-21849f7683d4-catalog-content\") pod \"community-operators-5bvtz\" (UID: \"0ef35133-3e1a-4f40-9aff-21849f7683d4\") " pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.837891 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0ef35133-3e1a-4f40-9aff-21849f7683d4-utilities\") pod \"community-operators-5bvtz\" (UID: \"0ef35133-3e1a-4f40-9aff-21849f7683d4\") " pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.872666 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-4f5g8\" (UniqueName: \"kubernetes.io/projected/0ef35133-3e1a-4f40-9aff-21849f7683d4-kube-api-access-4f5g8\") pod \"community-operators-5bvtz\" (UID: \"0ef35133-3e1a-4f40-9aff-21849f7683d4\") " pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.874984 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-5lh44"] Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.876014 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.877895 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.885679 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5lh44"] Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.937175 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3aeb8a55-85db-47ac-8b24-8efcf00a2a96-catalog-content\") pod \"redhat-operators-5lh44\" (UID: \"3aeb8a55-85db-47ac-8b24-8efcf00a2a96\") " pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.937231 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ftppt\" (UniqueName: \"kubernetes.io/projected/3aeb8a55-85db-47ac-8b24-8efcf00a2a96-kube-api-access-ftppt\") pod \"redhat-operators-5lh44\" (UID: \"3aeb8a55-85db-47ac-8b24-8efcf00a2a96\") " pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:35 crc kubenswrapper[4875]: I1126 00:12:35.937290 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3aeb8a55-85db-47ac-8b24-8efcf00a2a96-utilities\") pod \"redhat-operators-5lh44\" (UID: \"3aeb8a55-85db-47ac-8b24-8efcf00a2a96\") " pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.039073 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3aeb8a55-85db-47ac-8b24-8efcf00a2a96-catalog-content\") pod \"redhat-operators-5lh44\" (UID: \"3aeb8a55-85db-47ac-8b24-8efcf00a2a96\") " pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.039415 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ftppt\" (UniqueName: \"kubernetes.io/projected/3aeb8a55-85db-47ac-8b24-8efcf00a2a96-kube-api-access-ftppt\") pod \"redhat-operators-5lh44\" (UID: \"3aeb8a55-85db-47ac-8b24-8efcf00a2a96\") " pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.039471 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3aeb8a55-85db-47ac-8b24-8efcf00a2a96-utilities\") pod \"redhat-operators-5lh44\" (UID: \"3aeb8a55-85db-47ac-8b24-8efcf00a2a96\") " pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.040315 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3aeb8a55-85db-47ac-8b24-8efcf00a2a96-utilities\") pod \"redhat-operators-5lh44\" (UID: \"3aeb8a55-85db-47ac-8b24-8efcf00a2a96\") " pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.040452 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3aeb8a55-85db-47ac-8b24-8efcf00a2a96-catalog-content\") pod \"redhat-operators-5lh44\" (UID: \"3aeb8a55-85db-47ac-8b24-8efcf00a2a96\") " pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.043932 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.059025 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ftppt\" (UniqueName: \"kubernetes.io/projected/3aeb8a55-85db-47ac-8b24-8efcf00a2a96-kube-api-access-ftppt\") pod \"redhat-operators-5lh44\" (UID: \"3aeb8a55-85db-47ac-8b24-8efcf00a2a96\") " pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.210545 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.254666 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-5bvtz"] Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.316390 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7k6" event={"ID":"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd","Type":"ContainerStarted","Data":"16e7b57d222f8b3cbe8a3d561c96e1f9a98a55c67cc905fba59da0721ee20516"} Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.319346 4875 generic.go:334] "Generic (PLEG): container finished" podID="4194a5df-cccf-4047-b5e4-d4f6610a2b8f" containerID="98bb2787dc7db51d75ba08181f1d301ed73b768c27677ebe6b196355a5014038" exitCode=0 Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.319714 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-62ddv" event={"ID":"4194a5df-cccf-4047-b5e4-d4f6610a2b8f","Type":"ContainerDied","Data":"98bb2787dc7db51d75ba08181f1d301ed73b768c27677ebe6b196355a5014038"} Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.323246 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5bvtz" event={"ID":"0ef35133-3e1a-4f40-9aff-21849f7683d4","Type":"ContainerStarted","Data":"2bb123c21f69b51dcbf7ecbf5a288b73a7e2ee73843f61e99fd24bdf242087f0"} Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.336559 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-marketplace-9p7k6" podStartSLOduration=1.84173592 podStartE2EDuration="3.336541627s" podCreationTimestamp="2025-11-26 00:12:33 +0000 UTC" firstStartedPulling="2025-11-26 00:12:34.29288871 +0000 UTC m=+327.482864405" lastFinishedPulling="2025-11-26 00:12:35.787694417 +0000 UTC m=+328.977670112" observedRunningTime="2025-11-26 00:12:36.335851508 +0000 UTC m=+329.525827203" watchObservedRunningTime="2025-11-26 00:12:36.336541627 +0000 UTC m=+329.526517322" Nov 26 00:12:36 crc kubenswrapper[4875]: I1126 00:12:36.427931 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-5lh44"] Nov 26 00:12:37 crc kubenswrapper[4875]: I1126 00:12:37.331884 4875 generic.go:334] "Generic (PLEG): container finished" podID="0ef35133-3e1a-4f40-9aff-21849f7683d4" containerID="a34d6ecba7956c1f5edae6438d3c2b8bdf654da94e814f82626269e75de75cc2" exitCode=0 Nov 26 00:12:37 crc kubenswrapper[4875]: I1126 00:12:37.331957 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5bvtz" event={"ID":"0ef35133-3e1a-4f40-9aff-21849f7683d4","Type":"ContainerDied","Data":"a34d6ecba7956c1f5edae6438d3c2b8bdf654da94e814f82626269e75de75cc2"} Nov 26 00:12:37 crc kubenswrapper[4875]: I1126 00:12:37.334209 4875 generic.go:334] "Generic (PLEG): container finished" podID="3aeb8a55-85db-47ac-8b24-8efcf00a2a96" containerID="b2f3f83fd2f978113834409e709a69b7a887126fdac32c6e3653f58df9b96e35" exitCode=0 Nov 26 00:12:37 crc kubenswrapper[4875]: I1126 00:12:37.334276 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5lh44" event={"ID":"3aeb8a55-85db-47ac-8b24-8efcf00a2a96","Type":"ContainerDied","Data":"b2f3f83fd2f978113834409e709a69b7a887126fdac32c6e3653f58df9b96e35"} Nov 26 00:12:37 crc kubenswrapper[4875]: I1126 00:12:37.334308 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5lh44" event={"ID":"3aeb8a55-85db-47ac-8b24-8efcf00a2a96","Type":"ContainerStarted","Data":"0073391015f9e5e2fa30c9a3efeaa1fe45ebc167d9106b26eccd773eb79c5521"} Nov 26 00:12:37 crc kubenswrapper[4875]: I1126 00:12:37.336081 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-62ddv" event={"ID":"4194a5df-cccf-4047-b5e4-d4f6610a2b8f","Type":"ContainerStarted","Data":"75f1a1e4a0f30770a758b62562235a071f10dd986b1f22ff627d8552f9a39ae2"} Nov 26 00:12:37 crc kubenswrapper[4875]: I1126 00:12:37.376353 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-62ddv" podStartSLOduration=1.955605791 podStartE2EDuration="4.376337778s" podCreationTimestamp="2025-11-26 00:12:33 +0000 UTC" firstStartedPulling="2025-11-26 00:12:34.295635575 +0000 UTC m=+327.485611270" lastFinishedPulling="2025-11-26 00:12:36.716367562 +0000 UTC m=+329.906343257" observedRunningTime="2025-11-26 00:12:37.374336034 +0000 UTC m=+330.564311729" watchObservedRunningTime="2025-11-26 00:12:37.376337778 +0000 UTC m=+330.566313473" Nov 26 00:12:38 crc kubenswrapper[4875]: I1126 00:12:38.342982 4875 generic.go:334] "Generic (PLEG): container finished" podID="0ef35133-3e1a-4f40-9aff-21849f7683d4" containerID="e571fe5de1d4646dab189bda4d394ed3b24b4c9ba5ab429b5aa8d22bf44b7b77" exitCode=0 Nov 26 00:12:38 crc kubenswrapper[4875]: I1126 00:12:38.343043 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5bvtz" event={"ID":"0ef35133-3e1a-4f40-9aff-21849f7683d4","Type":"ContainerDied","Data":"e571fe5de1d4646dab189bda4d394ed3b24b4c9ba5ab429b5aa8d22bf44b7b77"} Nov 26 00:12:38 crc kubenswrapper[4875]: I1126 00:12:38.347252 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5lh44" event={"ID":"3aeb8a55-85db-47ac-8b24-8efcf00a2a96","Type":"ContainerStarted","Data":"b724a15c362bed82fcd405aa2e1f2cff81848eb7f26a5425dc9e9440d406c6f3"} Nov 26 00:12:39 crc kubenswrapper[4875]: I1126 00:12:39.353071 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-5bvtz" event={"ID":"0ef35133-3e1a-4f40-9aff-21849f7683d4","Type":"ContainerStarted","Data":"a8d23b26196fc898dceb25c109e6eda2e3f733100221d6a7a5631be5eefea3d8"} Nov 26 00:12:39 crc kubenswrapper[4875]: I1126 00:12:39.354689 4875 generic.go:334] "Generic (PLEG): container finished" podID="3aeb8a55-85db-47ac-8b24-8efcf00a2a96" containerID="b724a15c362bed82fcd405aa2e1f2cff81848eb7f26a5425dc9e9440d406c6f3" exitCode=0 Nov 26 00:12:39 crc kubenswrapper[4875]: I1126 00:12:39.354726 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5lh44" event={"ID":"3aeb8a55-85db-47ac-8b24-8efcf00a2a96","Type":"ContainerDied","Data":"b724a15c362bed82fcd405aa2e1f2cff81848eb7f26a5425dc9e9440d406c6f3"} Nov 26 00:12:39 crc kubenswrapper[4875]: I1126 00:12:39.378670 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-5bvtz" podStartSLOduration=2.95844686 podStartE2EDuration="4.378648502s" podCreationTimestamp="2025-11-26 00:12:35 +0000 UTC" firstStartedPulling="2025-11-26 00:12:37.333510734 +0000 UTC m=+330.523486419" lastFinishedPulling="2025-11-26 00:12:38.753712366 +0000 UTC m=+331.943688061" observedRunningTime="2025-11-26 00:12:39.376854333 +0000 UTC m=+332.566830028" watchObservedRunningTime="2025-11-26 00:12:39.378648502 +0000 UTC m=+332.568624197" Nov 26 00:12:41 crc kubenswrapper[4875]: I1126 00:12:41.367678 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-5lh44" event={"ID":"3aeb8a55-85db-47ac-8b24-8efcf00a2a96","Type":"ContainerStarted","Data":"d15c9a32d8c2a61c70753be19c30f5d05ea378a74fb2e77b478729ce1a33280c"} Nov 26 00:12:41 crc kubenswrapper[4875]: I1126 00:12:41.384736 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-5lh44" podStartSLOduration=3.945154676 podStartE2EDuration="6.384720479s" podCreationTimestamp="2025-11-26 00:12:35 +0000 UTC" firstStartedPulling="2025-11-26 00:12:37.335648343 +0000 UTC m=+330.525624038" lastFinishedPulling="2025-11-26 00:12:39.775214156 +0000 UTC m=+332.965189841" observedRunningTime="2025-11-26 00:12:41.383251949 +0000 UTC m=+334.573227644" watchObservedRunningTime="2025-11-26 00:12:41.384720479 +0000 UTC m=+334.574696174" Nov 26 00:12:43 crc kubenswrapper[4875]: I1126 00:12:43.606120 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:43 crc kubenswrapper[4875]: I1126 00:12:43.606837 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:43 crc kubenswrapper[4875]: I1126 00:12:43.666188 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:43 crc kubenswrapper[4875]: I1126 00:12:43.795979 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:43 crc kubenswrapper[4875]: I1126 00:12:43.796388 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:43 crc kubenswrapper[4875]: I1126 00:12:43.837567 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:44 crc kubenswrapper[4875]: I1126 00:12:44.421132 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-62ddv" Nov 26 00:12:44 crc kubenswrapper[4875]: I1126 00:12:44.425055 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:12:46 crc kubenswrapper[4875]: I1126 00:12:46.044739 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:46 crc kubenswrapper[4875]: I1126 00:12:46.044818 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:46 crc kubenswrapper[4875]: I1126 00:12:46.089536 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:46 crc kubenswrapper[4875]: I1126 00:12:46.211775 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:46 crc kubenswrapper[4875]: I1126 00:12:46.211880 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:46 crc kubenswrapper[4875]: I1126 00:12:46.252661 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:12:46 crc kubenswrapper[4875]: I1126 00:12:46.433856 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-5bvtz" Nov 26 00:12:46 crc kubenswrapper[4875]: I1126 00:12:46.434744 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-5lh44" Nov 26 00:13:07 crc kubenswrapper[4875]: I1126 00:13:07.512209 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:13:07 crc kubenswrapper[4875]: I1126 00:13:07.512639 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:13:37 crc kubenswrapper[4875]: I1126 00:13:37.511821 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:13:37 crc kubenswrapper[4875]: I1126 00:13:37.512651 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:14:07 crc kubenswrapper[4875]: I1126 00:14:07.512098 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:14:07 crc kubenswrapper[4875]: I1126 00:14:07.512675 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:14:07 crc kubenswrapper[4875]: I1126 00:14:07.512722 4875 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:14:07 crc kubenswrapper[4875]: I1126 00:14:07.513335 4875 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44"} pod="openshift-machine-config-operator/machine-config-daemon-9mztb" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 00:14:07 crc kubenswrapper[4875]: I1126 00:14:07.513393 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" containerID="cri-o://1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44" gracePeriod=600 Nov 26 00:14:07 crc kubenswrapper[4875]: I1126 00:14:07.891214 4875 generic.go:334] "Generic (PLEG): container finished" podID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerID="1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44" exitCode=0 Nov 26 00:14:07 crc kubenswrapper[4875]: I1126 00:14:07.891314 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerDied","Data":"1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44"} Nov 26 00:14:07 crc kubenswrapper[4875]: I1126 00:14:07.891699 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerStarted","Data":"c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284"} Nov 26 00:14:07 crc kubenswrapper[4875]: I1126 00:14:07.891728 4875 scope.go:117] "RemoveContainer" containerID="2cdf274332d9e071cabf1993c37531ebc66300c965dd736a4795252168efc371" Nov 26 00:14:11 crc kubenswrapper[4875]: E1126 00:14:11.736327 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:14:21 crc kubenswrapper[4875]: E1126 00:14:21.845537 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.237923 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-st7dd"] Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.239000 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.250289 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-st7dd"] Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.373879 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ea44bdfa-858a-4de7-ba77-44623dd98ae5-registry-certificates\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.374182 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ea44bdfa-858a-4de7-ba77-44623dd98ae5-bound-sa-token\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.374298 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ea44bdfa-858a-4de7-ba77-44623dd98ae5-ca-trust-extracted\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.374443 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ggqkf\" (UniqueName: \"kubernetes.io/projected/ea44bdfa-858a-4de7-ba77-44623dd98ae5-kube-api-access-ggqkf\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.374576 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ea44bdfa-858a-4de7-ba77-44623dd98ae5-trusted-ca\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.374688 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ea44bdfa-858a-4de7-ba77-44623dd98ae5-registry-tls\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.374823 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.374943 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ea44bdfa-858a-4de7-ba77-44623dd98ae5-installation-pull-secrets\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.400077 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.476106 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ea44bdfa-858a-4de7-ba77-44623dd98ae5-bound-sa-token\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.476177 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ea44bdfa-858a-4de7-ba77-44623dd98ae5-ca-trust-extracted\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.476214 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ggqkf\" (UniqueName: \"kubernetes.io/projected/ea44bdfa-858a-4de7-ba77-44623dd98ae5-kube-api-access-ggqkf\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.476243 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ea44bdfa-858a-4de7-ba77-44623dd98ae5-trusted-ca\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.476267 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ea44bdfa-858a-4de7-ba77-44623dd98ae5-registry-tls\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.476314 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ea44bdfa-858a-4de7-ba77-44623dd98ae5-installation-pull-secrets\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.476343 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ea44bdfa-858a-4de7-ba77-44623dd98ae5-registry-certificates\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.476890 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/ea44bdfa-858a-4de7-ba77-44623dd98ae5-ca-trust-extracted\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.477691 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/ea44bdfa-858a-4de7-ba77-44623dd98ae5-trusted-ca\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.477803 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/ea44bdfa-858a-4de7-ba77-44623dd98ae5-registry-certificates\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.483005 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/ea44bdfa-858a-4de7-ba77-44623dd98ae5-registry-tls\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.483125 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/ea44bdfa-858a-4de7-ba77-44623dd98ae5-installation-pull-secrets\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.492583 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/ea44bdfa-858a-4de7-ba77-44623dd98ae5-bound-sa-token\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.493474 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ggqkf\" (UniqueName: \"kubernetes.io/projected/ea44bdfa-858a-4de7-ba77-44623dd98ae5-kube-api-access-ggqkf\") pod \"image-registry-66df7c8f76-st7dd\" (UID: \"ea44bdfa-858a-4de7-ba77-44623dd98ae5\") " pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.556893 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.934209 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-image-registry/image-registry-66df7c8f76-st7dd"] Nov 26 00:14:27 crc kubenswrapper[4875]: W1126 00:14:27.941667 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podea44bdfa_858a_4de7_ba77_44623dd98ae5.slice/crio-51c6d920c54a6529fa6afb527073b963a96e116fb57459595c5c7e4950e5262b WatchSource:0}: Error finding container 51c6d920c54a6529fa6afb527073b963a96e116fb57459595c5c7e4950e5262b: Status 404 returned error can't find the container with id 51c6d920c54a6529fa6afb527073b963a96e116fb57459595c5c7e4950e5262b Nov 26 00:14:27 crc kubenswrapper[4875]: I1126 00:14:27.990141 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" event={"ID":"ea44bdfa-858a-4de7-ba77-44623dd98ae5","Type":"ContainerStarted","Data":"51c6d920c54a6529fa6afb527073b963a96e116fb57459595c5c7e4950e5262b"} Nov 26 00:14:28 crc kubenswrapper[4875]: I1126 00:14:28.996640 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" event={"ID":"ea44bdfa-858a-4de7-ba77-44623dd98ae5","Type":"ContainerStarted","Data":"a3b24b495981750f41068cf63122152120c9652ca60004082ba5f5ba0ba2d17d"} Nov 26 00:14:28 crc kubenswrapper[4875]: I1126 00:14:28.996999 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:29 crc kubenswrapper[4875]: I1126 00:14:29.027211 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" podStartSLOduration=2.027190133 podStartE2EDuration="2.027190133s" podCreationTimestamp="2025-11-26 00:14:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:14:29.016603003 +0000 UTC m=+442.206578698" watchObservedRunningTime="2025-11-26 00:14:29.027190133 +0000 UTC m=+442.217165828" Nov 26 00:14:31 crc kubenswrapper[4875]: E1126 00:14:31.962900 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:14:42 crc kubenswrapper[4875]: E1126 00:14:42.094308 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:14:47 crc kubenswrapper[4875]: I1126 00:14:47.562623 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-image-registry/image-registry-66df7c8f76-st7dd" Nov 26 00:14:47 crc kubenswrapper[4875]: I1126 00:14:47.621782 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8pktj"] Nov 26 00:14:52 crc kubenswrapper[4875]: E1126 00:14:52.209986 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.135062 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4"] Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.136407 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.138716 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.138816 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.142176 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4"] Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.222124 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9068322-9c4a-4726-92a1-0377774c37f3-secret-volume\") pod \"collect-profiles-29401935-c8dj4\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.222224 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9068322-9c4a-4726-92a1-0377774c37f3-config-volume\") pod \"collect-profiles-29401935-c8dj4\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.222288 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lw6gj\" (UniqueName: \"kubernetes.io/projected/b9068322-9c4a-4726-92a1-0377774c37f3-kube-api-access-lw6gj\") pod \"collect-profiles-29401935-c8dj4\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.334020 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9068322-9c4a-4726-92a1-0377774c37f3-secret-volume\") pod \"collect-profiles-29401935-c8dj4\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.334188 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9068322-9c4a-4726-92a1-0377774c37f3-config-volume\") pod \"collect-profiles-29401935-c8dj4\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.334314 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lw6gj\" (UniqueName: \"kubernetes.io/projected/b9068322-9c4a-4726-92a1-0377774c37f3-kube-api-access-lw6gj\") pod \"collect-profiles-29401935-c8dj4\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.337094 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9068322-9c4a-4726-92a1-0377774c37f3-config-volume\") pod \"collect-profiles-29401935-c8dj4\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.341232 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9068322-9c4a-4726-92a1-0377774c37f3-secret-volume\") pod \"collect-profiles-29401935-c8dj4\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.350803 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lw6gj\" (UniqueName: \"kubernetes.io/projected/b9068322-9c4a-4726-92a1-0377774c37f3-kube-api-access-lw6gj\") pod \"collect-profiles-29401935-c8dj4\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.456569 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:00 crc kubenswrapper[4875]: I1126 00:15:00.653535 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4"] Nov 26 00:15:01 crc kubenswrapper[4875]: I1126 00:15:01.174797 4875 generic.go:334] "Generic (PLEG): container finished" podID="b9068322-9c4a-4726-92a1-0377774c37f3" containerID="eebde6651a927177a962c36a387def18d4004afa4998e9245ae1dc052fa367e6" exitCode=0 Nov 26 00:15:01 crc kubenswrapper[4875]: I1126 00:15:01.174865 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" event={"ID":"b9068322-9c4a-4726-92a1-0377774c37f3","Type":"ContainerDied","Data":"eebde6651a927177a962c36a387def18d4004afa4998e9245ae1dc052fa367e6"} Nov 26 00:15:01 crc kubenswrapper[4875]: I1126 00:15:01.175050 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" event={"ID":"b9068322-9c4a-4726-92a1-0377774c37f3","Type":"ContainerStarted","Data":"d940b45ef6510bb7f3697319e563b2861e3b7a0b8548cfb0aa64d0198b0cc2cf"} Nov 26 00:15:02 crc kubenswrapper[4875]: E1126 00:15:02.364638 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache], [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:15:02 crc kubenswrapper[4875]: I1126 00:15:02.420788 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:02 crc kubenswrapper[4875]: I1126 00:15:02.561790 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9068322-9c4a-4726-92a1-0377774c37f3-secret-volume\") pod \"b9068322-9c4a-4726-92a1-0377774c37f3\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " Nov 26 00:15:02 crc kubenswrapper[4875]: I1126 00:15:02.561946 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9068322-9c4a-4726-92a1-0377774c37f3-config-volume\") pod \"b9068322-9c4a-4726-92a1-0377774c37f3\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " Nov 26 00:15:02 crc kubenswrapper[4875]: I1126 00:15:02.561990 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lw6gj\" (UniqueName: \"kubernetes.io/projected/b9068322-9c4a-4726-92a1-0377774c37f3-kube-api-access-lw6gj\") pod \"b9068322-9c4a-4726-92a1-0377774c37f3\" (UID: \"b9068322-9c4a-4726-92a1-0377774c37f3\") " Nov 26 00:15:02 crc kubenswrapper[4875]: I1126 00:15:02.563112 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/b9068322-9c4a-4726-92a1-0377774c37f3-config-volume" (OuterVolumeSpecName: "config-volume") pod "b9068322-9c4a-4726-92a1-0377774c37f3" (UID: "b9068322-9c4a-4726-92a1-0377774c37f3"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:15:02 crc kubenswrapper[4875]: I1126 00:15:02.568071 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/b9068322-9c4a-4726-92a1-0377774c37f3-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "b9068322-9c4a-4726-92a1-0377774c37f3" (UID: "b9068322-9c4a-4726-92a1-0377774c37f3"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:15:02 crc kubenswrapper[4875]: I1126 00:15:02.568131 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b9068322-9c4a-4726-92a1-0377774c37f3-kube-api-access-lw6gj" (OuterVolumeSpecName: "kube-api-access-lw6gj") pod "b9068322-9c4a-4726-92a1-0377774c37f3" (UID: "b9068322-9c4a-4726-92a1-0377774c37f3"). InnerVolumeSpecName "kube-api-access-lw6gj". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:15:02 crc kubenswrapper[4875]: I1126 00:15:02.663392 4875 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/b9068322-9c4a-4726-92a1-0377774c37f3-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:02 crc kubenswrapper[4875]: I1126 00:15:02.663461 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lw6gj\" (UniqueName: \"kubernetes.io/projected/b9068322-9c4a-4726-92a1-0377774c37f3-kube-api-access-lw6gj\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:02 crc kubenswrapper[4875]: I1126 00:15:02.663475 4875 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/b9068322-9c4a-4726-92a1-0377774c37f3-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:03 crc kubenswrapper[4875]: I1126 00:15:03.195339 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" event={"ID":"b9068322-9c4a-4726-92a1-0377774c37f3","Type":"ContainerDied","Data":"d940b45ef6510bb7f3697319e563b2861e3b7a0b8548cfb0aa64d0198b0cc2cf"} Nov 26 00:15:03 crc kubenswrapper[4875]: I1126 00:15:03.195383 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="d940b45ef6510bb7f3697319e563b2861e3b7a0b8548cfb0aa64d0198b0cc2cf" Nov 26 00:15:03 crc kubenswrapper[4875]: I1126 00:15:03.195451 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401935-c8dj4" Nov 26 00:15:12 crc kubenswrapper[4875]: I1126 00:15:12.663592 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" podUID="35ee6c04-3028-4b06-ac2b-6a26a2f2e147" containerName="registry" containerID="cri-o://8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3" gracePeriod=30 Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.017111 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.098968 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-ca-trust-extracted\") pod \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.099018 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-trusted-ca\") pod \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.099081 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-bound-sa-token\") pod \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.099121 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-certificates\") pod \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.099139 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-tls\") pod \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.099176 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-installation-pull-secrets\") pod \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.099194 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-58bm2\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-kube-api-access-58bm2\") pod \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.099398 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"registry-storage\" (UniqueName: \"kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8\") pod \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\" (UID: \"35ee6c04-3028-4b06-ac2b-6a26a2f2e147\") " Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.100261 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-trusted-ca" (OuterVolumeSpecName: "trusted-ca") pod "35ee6c04-3028-4b06-ac2b-6a26a2f2e147" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147"). InnerVolumeSpecName "trusted-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.101330 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-certificates" (OuterVolumeSpecName: "registry-certificates") pod "35ee6c04-3028-4b06-ac2b-6a26a2f2e147" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147"). InnerVolumeSpecName "registry-certificates". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.101554 4875 reconciler_common.go:293] "Volume detached for volume \"trusted-ca\" (UniqueName: \"kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-trusted-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.105139 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-installation-pull-secrets" (OuterVolumeSpecName: "installation-pull-secrets") pod "35ee6c04-3028-4b06-ac2b-6a26a2f2e147" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147"). InnerVolumeSpecName "installation-pull-secrets". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.105934 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-bound-sa-token" (OuterVolumeSpecName: "bound-sa-token") pod "35ee6c04-3028-4b06-ac2b-6a26a2f2e147" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147"). InnerVolumeSpecName "bound-sa-token". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.105976 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-kube-api-access-58bm2" (OuterVolumeSpecName: "kube-api-access-58bm2") pod "35ee6c04-3028-4b06-ac2b-6a26a2f2e147" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147"). InnerVolumeSpecName "kube-api-access-58bm2". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.106886 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-tls" (OuterVolumeSpecName: "registry-tls") pod "35ee6c04-3028-4b06-ac2b-6a26a2f2e147" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147"). InnerVolumeSpecName "registry-tls". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.110839 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/csi/kubevirt.io.hostpath-provisioner^pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8" (OuterVolumeSpecName: "registry-storage") pod "35ee6c04-3028-4b06-ac2b-6a26a2f2e147" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147"). InnerVolumeSpecName "pvc-657094db-63f1-4ba8-9a24-edca0e80b7a8". PluginName "kubernetes.io/csi", VolumeGidValue "" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.116279 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-ca-trust-extracted" (OuterVolumeSpecName: "ca-trust-extracted") pod "35ee6c04-3028-4b06-ac2b-6a26a2f2e147" (UID: "35ee6c04-3028-4b06-ac2b-6a26a2f2e147"). InnerVolumeSpecName "ca-trust-extracted". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.202696 4875 reconciler_common.go:293] "Volume detached for volume \"ca-trust-extracted\" (UniqueName: \"kubernetes.io/empty-dir/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-ca-trust-extracted\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.202747 4875 reconciler_common.go:293] "Volume detached for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-bound-sa-token\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.202760 4875 reconciler_common.go:293] "Volume detached for volume \"registry-certificates\" (UniqueName: \"kubernetes.io/configmap/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-certificates\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.202776 4875 reconciler_common.go:293] "Volume detached for volume \"registry-tls\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-registry-tls\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.202789 4875 reconciler_common.go:293] "Volume detached for volume \"installation-pull-secrets\" (UniqueName: \"kubernetes.io/secret/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-installation-pull-secrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.202799 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-58bm2\" (UniqueName: \"kubernetes.io/projected/35ee6c04-3028-4b06-ac2b-6a26a2f2e147-kube-api-access-58bm2\") on node \"crc\" DevicePath \"\"" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.245695 4875 generic.go:334] "Generic (PLEG): container finished" podID="35ee6c04-3028-4b06-ac2b-6a26a2f2e147" containerID="8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3" exitCode=0 Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.245767 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" event={"ID":"35ee6c04-3028-4b06-ac2b-6a26a2f2e147","Type":"ContainerDied","Data":"8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3"} Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.245816 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" event={"ID":"35ee6c04-3028-4b06-ac2b-6a26a2f2e147","Type":"ContainerDied","Data":"2bbe32ed8153918f703d154ed3cf2248360d1f0df5390bc25c0a9c50b1c18f5b"} Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.245836 4875 scope.go:117] "RemoveContainer" containerID="8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.246486 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-image-registry/image-registry-697d97f7c8-8pktj" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.264344 4875 scope.go:117] "RemoveContainer" containerID="8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3" Nov 26 00:15:13 crc kubenswrapper[4875]: E1126 00:15:13.264943 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3\": container with ID starting with 8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3 not found: ID does not exist" containerID="8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.264983 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3"} err="failed to get container status \"8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3\": rpc error: code = NotFound desc = could not find container \"8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3\": container with ID starting with 8b4779696827ab0a776ceda15157ee83bc8b91ae42560ffa548e3cbfc8c2e5e3 not found: ID does not exist" Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.283596 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8pktj"] Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.286714 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-image-registry/image-registry-697d97f7c8-8pktj"] Nov 26 00:15:13 crc kubenswrapper[4875]: I1126 00:15:13.539445 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="35ee6c04-3028-4b06-ac2b-6a26a2f2e147" path="/var/lib/kubelet/pods/35ee6c04-3028-4b06-ac2b-6a26a2f2e147/volumes" Nov 26 00:16:07 crc kubenswrapper[4875]: I1126 00:16:07.511924 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:16:07 crc kubenswrapper[4875]: I1126 00:16:07.512973 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:16:07 crc kubenswrapper[4875]: I1126 00:16:07.623907 4875 scope.go:117] "RemoveContainer" containerID="cbb25ab15effa2ab791d946e8c4ac53b8d56715dcaf3c72ea6fb04d3f88674c9" Nov 26 00:16:37 crc kubenswrapper[4875]: I1126 00:16:37.511739 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:16:37 crc kubenswrapper[4875]: I1126 00:16:37.512243 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:17:07 crc kubenswrapper[4875]: I1126 00:17:07.511865 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:17:07 crc kubenswrapper[4875]: I1126 00:17:07.512532 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:17:07 crc kubenswrapper[4875]: I1126 00:17:07.512579 4875 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:17:07 crc kubenswrapper[4875]: I1126 00:17:07.513173 4875 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284"} pod="openshift-machine-config-operator/machine-config-daemon-9mztb" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 00:17:07 crc kubenswrapper[4875]: I1126 00:17:07.513225 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" containerID="cri-o://c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284" gracePeriod=600 Nov 26 00:17:07 crc kubenswrapper[4875]: I1126 00:17:07.828889 4875 generic.go:334] "Generic (PLEG): container finished" podID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerID="c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284" exitCode=0 Nov 26 00:17:07 crc kubenswrapper[4875]: I1126 00:17:07.828955 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerDied","Data":"c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284"} Nov 26 00:17:07 crc kubenswrapper[4875]: I1126 00:17:07.829251 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerStarted","Data":"4a7640d4f785bf9d0f293ee193979132e8a131e63ea85ce86ad12662c6293db6"} Nov 26 00:17:07 crc kubenswrapper[4875]: I1126 00:17:07.829284 4875 scope.go:117] "RemoveContainer" containerID="1e834a56eabb3c55e61cc7d226e09bfcf80d0068e3d0752e0aeac7fbc0a28a44" Nov 26 00:17:13 crc kubenswrapper[4875]: E1126 00:17:13.844509 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:17:23 crc kubenswrapper[4875]: E1126 00:17:23.948829 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:17:34 crc kubenswrapper[4875]: E1126 00:17:34.056889 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:17:44 crc kubenswrapper[4875]: E1126 00:17:44.225213 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:17:54 crc kubenswrapper[4875]: E1126 00:17:54.353812 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:18:04 crc kubenswrapper[4875]: E1126 00:18:04.479800 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:18:07 crc kubenswrapper[4875]: I1126 00:18:07.671689 4875 scope.go:117] "RemoveContainer" containerID="de1432f0ebf79167ad37606833d991e09691bcb81997d1d682456c7ae2b6c30a" Nov 26 00:18:07 crc kubenswrapper[4875]: I1126 00:18:07.704848 4875 scope.go:117] "RemoveContainer" containerID="ce17bb87c3b5c1763171fda89918725c6c53911abdf43b4e5aff64923ddc9810" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.498713 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hh8lx"] Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.499592 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovn-controller" containerID="cri-o://4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826" gracePeriod=30 Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.499774 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="sbdb" containerID="cri-o://2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14" gracePeriod=30 Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.499826 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="kube-rbac-proxy-node" containerID="cri-o://9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e" gracePeriod=30 Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.499857 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovn-acl-logging" containerID="cri-o://7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886" gracePeriod=30 Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.499875 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="northd" containerID="cri-o://7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84" gracePeriod=30 Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.499887 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="kube-rbac-proxy-ovn-metrics" containerID="cri-o://a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2" gracePeriod=30 Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.499900 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="nbdb" containerID="cri-o://2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a" gracePeriod=30 Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.541017 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" containerID="cri-o://d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492" gracePeriod=30 Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.845654 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/3.log" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.847694 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovn-acl-logging/0.log" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.848212 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovn-controller/0.log" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.848626 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898314 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-l6jtc"] Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898522 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="kube-rbac-proxy-node" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898534 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="kube-rbac-proxy-node" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898542 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898548 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898557 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="b9068322-9c4a-4726-92a1-0377774c37f3" containerName="collect-profiles" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898563 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="b9068322-9c4a-4726-92a1-0377774c37f3" containerName="collect-profiles" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898575 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="kubecfg-setup" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898582 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="kubecfg-setup" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898588 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovn-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898593 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovn-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898600 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="35ee6c04-3028-4b06-ac2b-6a26a2f2e147" containerName="registry" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898605 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="35ee6c04-3028-4b06-ac2b-6a26a2f2e147" containerName="registry" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898614 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898619 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898627 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898634 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898642 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="sbdb" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898647 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="sbdb" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898656 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898662 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898672 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898678 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898686 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovn-acl-logging" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898693 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovn-acl-logging" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898702 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="northd" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898709 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="northd" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898716 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="nbdb" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898723 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="nbdb" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898807 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898816 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="b9068322-9c4a-4726-92a1-0377774c37f3" containerName="collect-profiles" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898822 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898828 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="kube-rbac-proxy-ovn-metrics" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898836 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898843 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898852 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="sbdb" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898858 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="northd" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898865 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovn-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898873 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovn-acl-logging" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898880 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="35ee6c04-3028-4b06-ac2b-6a26a2f2e147" containerName="registry" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898889 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="kube-rbac-proxy-node" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898897 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="nbdb" Nov 26 00:18:44 crc kubenswrapper[4875]: E1126 00:18:44.898976 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.898982 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.899060 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerName="ovnkube-controller" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.900503 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.922920 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-openvswitch\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.922977 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-systemd\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923013 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-env-overrides\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923011 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-openvswitch" (OuterVolumeSpecName: "run-openvswitch") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "run-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923033 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-ovn-kubernetes\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923078 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-ovn-kubernetes" (OuterVolumeSpecName: "host-run-ovn-kubernetes") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "host-run-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923161 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-var-lib-openvswitch\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923198 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-log-socket\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923240 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovn-node-metrics-cert\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923228 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-var-lib-openvswitch" (OuterVolumeSpecName: "var-lib-openvswitch") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "var-lib-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923277 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mr9lb\" (UniqueName: \"kubernetes.io/projected/8fe08e0b-f339-4beb-953e-6e0180c6c945-kube-api-access-mr9lb\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923305 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-log-socket" (OuterVolumeSpecName: "log-socket") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "log-socket". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923306 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-etc-openvswitch\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923330 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-etc-openvswitch" (OuterVolumeSpecName: "etc-openvswitch") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "etc-openvswitch". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923349 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-slash\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923379 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-systemd-units\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923398 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-var-lib-cni-networks-ovn-kubernetes\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923456 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-script-lib\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923478 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-kubelet\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923521 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-config\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923554 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-node-log\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923585 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-netd\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923613 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-bin\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923609 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-env-overrides" (OuterVolumeSpecName: "env-overrides") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "env-overrides". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923633 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-netns\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923660 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-ovn\") pod \"8fe08e0b-f339-4beb-953e-6e0180c6c945\" (UID: \"8fe08e0b-f339-4beb-953e-6e0180c6c945\") " Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923805 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-8nqgd\" (UniqueName: \"kubernetes.io/projected/8d2c9dcf-64cb-4d65-b161-9755122e490f-kube-api-access-8nqgd\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923833 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-run-openvswitch\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923849 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8d2c9dcf-64cb-4d65-b161-9755122e490f-ovnkube-config\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923866 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-cni-netd\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923883 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-log-socket\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923908 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-systemd-units\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923926 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-run-systemd\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923943 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-node-log\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923966 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-run-ovn\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923984 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8d2c9dcf-64cb-4d65-b161-9755122e490f-env-overrides\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923997 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-cni-bin\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924016 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8d2c9dcf-64cb-4d65-b161-9755122e490f-ovn-node-metrics-cert\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924039 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-run-netns\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924058 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-kubelet\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924073 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-etc-openvswitch\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924109 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924127 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-run-ovn-kubernetes\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924147 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-slash\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924163 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8d2c9dcf-64cb-4d65-b161-9755122e490f-ovnkube-script-lib\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924226 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-var-lib-openvswitch\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924256 4875 reconciler_common.go:293] "Volume detached for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924266 4875 reconciler_common.go:293] "Volume detached for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-env-overrides\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924275 4875 reconciler_common.go:293] "Volume detached for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924284 4875 reconciler_common.go:293] "Volume detached for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-var-lib-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924292 4875 reconciler_common.go:293] "Volume detached for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-log-socket\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924300 4875 reconciler_common.go:293] "Volume detached for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-etc-openvswitch\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923658 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-kubelet" (OuterVolumeSpecName: "host-kubelet") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "host-kubelet". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924482 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-bin" (OuterVolumeSpecName: "host-cni-bin") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "host-cni-bin". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923676 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-slash" (OuterVolumeSpecName: "host-slash") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "host-slash". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923690 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-systemd-units" (OuterVolumeSpecName: "systemd-units") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "systemd-units". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924504 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-netns" (OuterVolumeSpecName: "host-run-netns") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "host-run-netns". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.923705 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-var-lib-cni-networks-ovn-kubernetes" (OuterVolumeSpecName: "host-var-lib-cni-networks-ovn-kubernetes") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "host-var-lib-cni-networks-ovn-kubernetes". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924060 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-script-lib" (OuterVolumeSpecName: "ovnkube-script-lib") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "ovnkube-script-lib". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924383 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-ovn" (OuterVolumeSpecName: "run-ovn") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "run-ovn". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924453 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-node-log" (OuterVolumeSpecName: "node-log") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "node-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924468 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-netd" (OuterVolumeSpecName: "host-cni-netd") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "host-cni-netd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.924467 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-config" (OuterVolumeSpecName: "ovnkube-config") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "ovnkube-config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.929321 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovn-node-metrics-cert" (OuterVolumeSpecName: "ovn-node-metrics-cert") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "ovn-node-metrics-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.931539 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8fe08e0b-f339-4beb-953e-6e0180c6c945-kube-api-access-mr9lb" (OuterVolumeSpecName: "kube-api-access-mr9lb") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "kube-api-access-mr9lb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:18:44 crc kubenswrapper[4875]: I1126 00:18:44.939277 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-systemd" (OuterVolumeSpecName: "run-systemd") pod "8fe08e0b-f339-4beb-953e-6e0180c6c945" (UID: "8fe08e0b-f339-4beb-953e-6e0180c6c945"). InnerVolumeSpecName "run-systemd". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.024781 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-run-ovn\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.024826 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-cni-bin\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.024843 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8d2c9dcf-64cb-4d65-b161-9755122e490f-env-overrides\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.024861 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8d2c9dcf-64cb-4d65-b161-9755122e490f-ovn-node-metrics-cert\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.024879 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-run-netns\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.024894 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-run-ovn\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.024919 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-kubelet\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.024897 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-kubelet\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025023 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-cni-bin\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025044 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-run-netns\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025112 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-etc-openvswitch\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025170 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"etc-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-etc-openvswitch\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025200 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025233 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-run-ovn-kubernetes\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025283 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-slash\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025299 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8d2c9dcf-64cb-4d65-b161-9755122e490f-ovnkube-script-lib\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025318 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-var-lib-openvswitch\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025322 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-var-lib-cni-networks-ovn-kubernetes\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025351 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-slash\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025351 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-8nqgd\" (UniqueName: \"kubernetes.io/projected/8d2c9dcf-64cb-4d65-b161-9755122e490f-kube-api-access-8nqgd\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025333 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-run-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-run-ovn-kubernetes\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025368 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lib-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-var-lib-openvswitch\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025528 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-run-openvswitch\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025536 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"env-overrides\" (UniqueName: \"kubernetes.io/configmap/8d2c9dcf-64cb-4d65-b161-9755122e490f-env-overrides\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025546 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8d2c9dcf-64cb-4d65-b161-9755122e490f-ovnkube-config\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025598 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-cni-netd\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025617 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-openvswitch\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-run-openvswitch\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025635 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-log-socket\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025650 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-host-cni-netd\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025677 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"log-socket\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-log-socket\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025707 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-systemd-units\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025725 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-run-systemd\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025781 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-node-log\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025804 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-node-log\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025779 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-systemd-units\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025782 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8d2c9dcf-64cb-4d65-b161-9755122e490f-run-systemd\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025849 4875 reconciler_common.go:293] "Volume detached for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovn-node-metrics-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025865 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mr9lb\" (UniqueName: \"kubernetes.io/projected/8fe08e0b-f339-4beb-953e-6e0180c6c945-kube-api-access-mr9lb\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025874 4875 reconciler_common.go:293] "Volume detached for volume \"host-slash\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-slash\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025885 4875 reconciler_common.go:293] "Volume detached for volume \"systemd-units\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-systemd-units\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025895 4875 reconciler_common.go:293] "Volume detached for volume \"host-var-lib-cni-networks-ovn-kubernetes\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-var-lib-cni-networks-ovn-kubernetes\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025907 4875 reconciler_common.go:293] "Volume detached for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-script-lib\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025917 4875 reconciler_common.go:293] "Volume detached for volume \"host-kubelet\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-kubelet\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025929 4875 reconciler_common.go:293] "Volume detached for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8fe08e0b-f339-4beb-953e-6e0180c6c945-ovnkube-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025931 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-script-lib\" (UniqueName: \"kubernetes.io/configmap/8d2c9dcf-64cb-4d65-b161-9755122e490f-ovnkube-script-lib\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025939 4875 reconciler_common.go:293] "Volume detached for volume \"node-log\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-node-log\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025978 4875 reconciler_common.go:293] "Volume detached for volume \"host-cni-netd\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-netd\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025987 4875 reconciler_common.go:293] "Volume detached for volume \"host-cni-bin\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-cni-bin\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.025997 4875 reconciler_common.go:293] "Volume detached for volume \"host-run-netns\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-host-run-netns\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.026005 4875 reconciler_common.go:293] "Volume detached for volume \"run-ovn\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-ovn\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.026014 4875 reconciler_common.go:293] "Volume detached for volume \"run-systemd\" (UniqueName: \"kubernetes.io/host-path/8fe08e0b-f339-4beb-953e-6e0180c6c945-run-systemd\") on node \"crc\" DevicePath \"\"" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.026232 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovnkube-config\" (UniqueName: \"kubernetes.io/configmap/8d2c9dcf-64cb-4d65-b161-9755122e490f-ovnkube-config\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.028899 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"ovn-node-metrics-cert\" (UniqueName: \"kubernetes.io/secret/8d2c9dcf-64cb-4d65-b161-9755122e490f-ovn-node-metrics-cert\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.040080 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-8nqgd\" (UniqueName: \"kubernetes.io/projected/8d2c9dcf-64cb-4d65-b161-9755122e490f-kube-api-access-8nqgd\") pod \"ovnkube-node-l6jtc\" (UID: \"8d2c9dcf-64cb-4d65-b161-9755122e490f\") " pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.220927 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.318853 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovnkube-controller/3.log" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.321228 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovn-acl-logging/0.log" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.321758 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-ovn-kubernetes_ovnkube-node-hh8lx_8fe08e0b-f339-4beb-953e-6e0180c6c945/ovn-controller/0.log" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322115 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492" exitCode=0 Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322147 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14" exitCode=0 Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322157 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a" exitCode=0 Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322152 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322168 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84" exitCode=0 Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322233 4875 scope.go:117] "RemoveContainer" containerID="d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322238 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322264 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2" exitCode=0 Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322219 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322308 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e" exitCode=0 Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322330 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886" exitCode=143 Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322336 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322351 4875 generic.go:334] "Generic (PLEG): container finished" podID="8fe08e0b-f339-4beb-953e-6e0180c6c945" containerID="4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826" exitCode=143 Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322362 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322382 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322400 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322439 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322454 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322462 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322469 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322476 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322484 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322492 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322499 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322507 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322518 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322531 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322540 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322548 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322555 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322562 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322569 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322576 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322583 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322590 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322597 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322607 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322618 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322626 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322634 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322641 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322648 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322655 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322662 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322669 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322678 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322685 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322695 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-hh8lx" event={"ID":"8fe08e0b-f339-4beb-953e-6e0180c6c945","Type":"ContainerDied","Data":"247927d0cd1075c4092cddef6dfe1a6d11cb3f6bbaacdbdd4a9afde59acfc187"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322706 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322715 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322721 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322728 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322735 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322742 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322749 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322756 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322764 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.322771 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.323750 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" event={"ID":"8d2c9dcf-64cb-4d65-b161-9755122e490f","Type":"ContainerStarted","Data":"31b62bffa7c0c9f141276ae9f03f0164acbceb814ed3b4dd00a5bdbd93ab5ae0"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.327105 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8wvqp_9fd65949-6a8a-4828-baa4-1c88c71d5ed2/kube-multus/2.log" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.327559 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8wvqp_9fd65949-6a8a-4828-baa4-1c88c71d5ed2/kube-multus/1.log" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.327609 4875 generic.go:334] "Generic (PLEG): container finished" podID="9fd65949-6a8a-4828-baa4-1c88c71d5ed2" containerID="72b992d4d593659316136ff4983256fa4cb0d025d16018153f4829f1003548ac" exitCode=2 Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.327640 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8wvqp" event={"ID":"9fd65949-6a8a-4828-baa4-1c88c71d5ed2","Type":"ContainerDied","Data":"72b992d4d593659316136ff4983256fa4cb0d025d16018153f4829f1003548ac"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.327666 4875 pod_container_deletor.go:114] "Failed to issue the request to remove container" containerID={"Type":"cri-o","ID":"8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424"} Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.328879 4875 scope.go:117] "RemoveContainer" containerID="72b992d4d593659316136ff4983256fa4cb0d025d16018153f4829f1003548ac" Nov 26 00:18:45 crc kubenswrapper[4875]: E1126 00:18:45.329100 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-8wvqp_openshift-multus(9fd65949-6a8a-4828-baa4-1c88c71d5ed2)\"" pod="openshift-multus/multus-8wvqp" podUID="9fd65949-6a8a-4828-baa4-1c88c71d5ed2" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.338292 4875 scope.go:117] "RemoveContainer" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.354330 4875 scope.go:117] "RemoveContainer" containerID="2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.367887 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hh8lx"] Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.382217 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-ovn-kubernetes/ovnkube-node-hh8lx"] Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.438738 4875 scope.go:117] "RemoveContainer" containerID="2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.449134 4875 scope.go:117] "RemoveContainer" containerID="7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.459282 4875 scope.go:117] "RemoveContainer" containerID="a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.470570 4875 scope.go:117] "RemoveContainer" containerID="9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.480175 4875 scope.go:117] "RemoveContainer" containerID="7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.490640 4875 scope.go:117] "RemoveContainer" containerID="4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.518029 4875 scope.go:117] "RemoveContainer" containerID="d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.528603 4875 scope.go:117] "RemoveContainer" containerID="d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492" Nov 26 00:18:45 crc kubenswrapper[4875]: E1126 00:18:45.532870 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492\": container with ID starting with d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492 not found: ID does not exist" containerID="d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.532906 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492"} err="failed to get container status \"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492\": rpc error: code = NotFound desc = could not find container \"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492\": container with ID starting with d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.532931 4875 scope.go:117] "RemoveContainer" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" Nov 26 00:18:45 crc kubenswrapper[4875]: E1126 00:18:45.533764 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\": container with ID starting with 2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d not found: ID does not exist" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.533801 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d"} err="failed to get container status \"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\": rpc error: code = NotFound desc = could not find container \"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\": container with ID starting with 2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.533814 4875 scope.go:117] "RemoveContainer" containerID="2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14" Nov 26 00:18:45 crc kubenswrapper[4875]: E1126 00:18:45.534094 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\": container with ID starting with 2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14 not found: ID does not exist" containerID="2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.534131 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14"} err="failed to get container status \"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\": rpc error: code = NotFound desc = could not find container \"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\": container with ID starting with 2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.534234 4875 scope.go:117] "RemoveContainer" containerID="2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a" Nov 26 00:18:45 crc kubenswrapper[4875]: E1126 00:18:45.534584 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\": container with ID starting with 2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a not found: ID does not exist" containerID="2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.534607 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a"} err="failed to get container status \"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\": rpc error: code = NotFound desc = could not find container \"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\": container with ID starting with 2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.534629 4875 scope.go:117] "RemoveContainer" containerID="7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84" Nov 26 00:18:45 crc kubenswrapper[4875]: E1126 00:18:45.534849 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\": container with ID starting with 7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84 not found: ID does not exist" containerID="7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.534873 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84"} err="failed to get container status \"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\": rpc error: code = NotFound desc = could not find container \"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\": container with ID starting with 7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.534887 4875 scope.go:117] "RemoveContainer" containerID="a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2" Nov 26 00:18:45 crc kubenswrapper[4875]: E1126 00:18:45.535102 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\": container with ID starting with a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2 not found: ID does not exist" containerID="a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.535123 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2"} err="failed to get container status \"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\": rpc error: code = NotFound desc = could not find container \"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\": container with ID starting with a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.535135 4875 scope.go:117] "RemoveContainer" containerID="9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e" Nov 26 00:18:45 crc kubenswrapper[4875]: E1126 00:18:45.535385 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\": container with ID starting with 9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e not found: ID does not exist" containerID="9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.535440 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e"} err="failed to get container status \"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\": rpc error: code = NotFound desc = could not find container \"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\": container with ID starting with 9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.535461 4875 scope.go:117] "RemoveContainer" containerID="7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886" Nov 26 00:18:45 crc kubenswrapper[4875]: E1126 00:18:45.535753 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\": container with ID starting with 7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886 not found: ID does not exist" containerID="7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.535781 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886"} err="failed to get container status \"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\": rpc error: code = NotFound desc = could not find container \"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\": container with ID starting with 7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.535795 4875 scope.go:117] "RemoveContainer" containerID="4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826" Nov 26 00:18:45 crc kubenswrapper[4875]: E1126 00:18:45.536007 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\": container with ID starting with 4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826 not found: ID does not exist" containerID="4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.536030 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826"} err="failed to get container status \"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\": rpc error: code = NotFound desc = could not find container \"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\": container with ID starting with 4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.536043 4875 scope.go:117] "RemoveContainer" containerID="d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.536252 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8fe08e0b-f339-4beb-953e-6e0180c6c945" path="/var/lib/kubelet/pods/8fe08e0b-f339-4beb-953e-6e0180c6c945/volumes" Nov 26 00:18:45 crc kubenswrapper[4875]: E1126 00:18:45.536258 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\": container with ID starting with d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21 not found: ID does not exist" containerID="d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.536284 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21"} err="failed to get container status \"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\": rpc error: code = NotFound desc = could not find container \"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\": container with ID starting with d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.536297 4875 scope.go:117] "RemoveContainer" containerID="d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.536530 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492"} err="failed to get container status \"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492\": rpc error: code = NotFound desc = could not find container \"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492\": container with ID starting with d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.536557 4875 scope.go:117] "RemoveContainer" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.536777 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d"} err="failed to get container status \"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\": rpc error: code = NotFound desc = could not find container \"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\": container with ID starting with 2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.536799 4875 scope.go:117] "RemoveContainer" containerID="2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.537067 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14"} err="failed to get container status \"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\": rpc error: code = NotFound desc = could not find container \"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\": container with ID starting with 2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.537107 4875 scope.go:117] "RemoveContainer" containerID="2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.537314 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a"} err="failed to get container status \"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\": rpc error: code = NotFound desc = could not find container \"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\": container with ID starting with 2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.537338 4875 scope.go:117] "RemoveContainer" containerID="7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.537613 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84"} err="failed to get container status \"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\": rpc error: code = NotFound desc = could not find container \"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\": container with ID starting with 7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.537635 4875 scope.go:117] "RemoveContainer" containerID="a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.537895 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2"} err="failed to get container status \"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\": rpc error: code = NotFound desc = could not find container \"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\": container with ID starting with a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.537924 4875 scope.go:117] "RemoveContainer" containerID="9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.538106 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e"} err="failed to get container status \"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\": rpc error: code = NotFound desc = could not find container \"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\": container with ID starting with 9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.538126 4875 scope.go:117] "RemoveContainer" containerID="7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.538602 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886"} err="failed to get container status \"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\": rpc error: code = NotFound desc = could not find container \"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\": container with ID starting with 7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.538638 4875 scope.go:117] "RemoveContainer" containerID="4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.538918 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826"} err="failed to get container status \"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\": rpc error: code = NotFound desc = could not find container \"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\": container with ID starting with 4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.538948 4875 scope.go:117] "RemoveContainer" containerID="d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.539170 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21"} err="failed to get container status \"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\": rpc error: code = NotFound desc = could not find container \"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\": container with ID starting with d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.539190 4875 scope.go:117] "RemoveContainer" containerID="d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.539433 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492"} err="failed to get container status \"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492\": rpc error: code = NotFound desc = could not find container \"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492\": container with ID starting with d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.539459 4875 scope.go:117] "RemoveContainer" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.541133 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d"} err="failed to get container status \"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\": rpc error: code = NotFound desc = could not find container \"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\": container with ID starting with 2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.541157 4875 scope.go:117] "RemoveContainer" containerID="2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.541383 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14"} err="failed to get container status \"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\": rpc error: code = NotFound desc = could not find container \"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\": container with ID starting with 2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.541436 4875 scope.go:117] "RemoveContainer" containerID="2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.541717 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a"} err="failed to get container status \"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\": rpc error: code = NotFound desc = could not find container \"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\": container with ID starting with 2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.541737 4875 scope.go:117] "RemoveContainer" containerID="7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.541939 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84"} err="failed to get container status \"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\": rpc error: code = NotFound desc = could not find container \"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\": container with ID starting with 7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.541963 4875 scope.go:117] "RemoveContainer" containerID="a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.542181 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2"} err="failed to get container status \"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\": rpc error: code = NotFound desc = could not find container \"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\": container with ID starting with a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.542198 4875 scope.go:117] "RemoveContainer" containerID="9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.542371 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e"} err="failed to get container status \"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\": rpc error: code = NotFound desc = could not find container \"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\": container with ID starting with 9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.542390 4875 scope.go:117] "RemoveContainer" containerID="7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.542611 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886"} err="failed to get container status \"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\": rpc error: code = NotFound desc = could not find container \"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\": container with ID starting with 7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.542632 4875 scope.go:117] "RemoveContainer" containerID="4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.542829 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826"} err="failed to get container status \"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\": rpc error: code = NotFound desc = could not find container \"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\": container with ID starting with 4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.542846 4875 scope.go:117] "RemoveContainer" containerID="d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.543037 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21"} err="failed to get container status \"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\": rpc error: code = NotFound desc = could not find container \"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\": container with ID starting with d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.543054 4875 scope.go:117] "RemoveContainer" containerID="d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.543268 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492"} err="failed to get container status \"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492\": rpc error: code = NotFound desc = could not find container \"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492\": container with ID starting with d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.543286 4875 scope.go:117] "RemoveContainer" containerID="2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.543553 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d"} err="failed to get container status \"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\": rpc error: code = NotFound desc = could not find container \"2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d\": container with ID starting with 2e1e37ff34e8016d35de4328475cea8cef96f21dc241d6ab0dfa8c068c38059d not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.543589 4875 scope.go:117] "RemoveContainer" containerID="2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.543830 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14"} err="failed to get container status \"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\": rpc error: code = NotFound desc = could not find container \"2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14\": container with ID starting with 2b56027e37ad895e930ec4338ee97eb7f9ef9de58228b72c68d1b92f45dbdb14 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.543849 4875 scope.go:117] "RemoveContainer" containerID="2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.544054 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a"} err="failed to get container status \"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\": rpc error: code = NotFound desc = could not find container \"2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a\": container with ID starting with 2d08169a9f9aced64cc63aba5103ff1c1163b59299cb5df5ac4dab9b2a1ec51a not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.544101 4875 scope.go:117] "RemoveContainer" containerID="7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.544362 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84"} err="failed to get container status \"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\": rpc error: code = NotFound desc = could not find container \"7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84\": container with ID starting with 7b8eb109a041ecf38951e5eeba1eb3e1d0514802f9afe0f765cf104c060cee84 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.544399 4875 scope.go:117] "RemoveContainer" containerID="a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.544684 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2"} err="failed to get container status \"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\": rpc error: code = NotFound desc = could not find container \"a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2\": container with ID starting with a390763f3fbb1870d2e5fa590f5396c78df6c5610185e53256b823a5fe99e1e2 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.544754 4875 scope.go:117] "RemoveContainer" containerID="9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.544973 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e"} err="failed to get container status \"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\": rpc error: code = NotFound desc = could not find container \"9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e\": container with ID starting with 9a488916e78343cd90dc6fc0351dccfbcf463767ce81d7df5bbc9e149493be2e not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.544993 4875 scope.go:117] "RemoveContainer" containerID="7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.545265 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886"} err="failed to get container status \"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\": rpc error: code = NotFound desc = could not find container \"7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886\": container with ID starting with 7b3e011cb8a4e5ddae22ad6e012b78987d77e7881e361e8d61e1192f6cfa7886 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.545314 4875 scope.go:117] "RemoveContainer" containerID="4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.545580 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826"} err="failed to get container status \"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\": rpc error: code = NotFound desc = could not find container \"4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826\": container with ID starting with 4d1b09ccc71656f0b6d1bfb627405f6e6d81ba914871a174d101cb50d58dd826 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.545627 4875 scope.go:117] "RemoveContainer" containerID="d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.545857 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21"} err="failed to get container status \"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\": rpc error: code = NotFound desc = could not find container \"d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21\": container with ID starting with d2474297cb6e09d1cb9c0cd0547a8222676017908549e384a0cce6cd11a6ee21 not found: ID does not exist" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.545883 4875 scope.go:117] "RemoveContainer" containerID="d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492" Nov 26 00:18:45 crc kubenswrapper[4875]: I1126 00:18:45.546172 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492"} err="failed to get container status \"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492\": rpc error: code = NotFound desc = could not find container \"d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492\": container with ID starting with d43a2eaad6ec6c57703a6f096ce5a496f8cc45abb1d78a009745a72b9b9bf492 not found: ID does not exist" Nov 26 00:18:46 crc kubenswrapper[4875]: I1126 00:18:46.336380 4875 generic.go:334] "Generic (PLEG): container finished" podID="8d2c9dcf-64cb-4d65-b161-9755122e490f" containerID="27a396ecbeff1be0adb5f2be416d204006273edaecd7ac5ab7f3f5f9f2dfc550" exitCode=0 Nov 26 00:18:46 crc kubenswrapper[4875]: I1126 00:18:46.336478 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" event={"ID":"8d2c9dcf-64cb-4d65-b161-9755122e490f","Type":"ContainerDied","Data":"27a396ecbeff1be0adb5f2be416d204006273edaecd7ac5ab7f3f5f9f2dfc550"} Nov 26 00:18:47 crc kubenswrapper[4875]: I1126 00:18:47.344705 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" event={"ID":"8d2c9dcf-64cb-4d65-b161-9755122e490f","Type":"ContainerStarted","Data":"0dd654d95af97c84cffb76d748689ee01ec644ebb1034798641a6e1240e9bbda"} Nov 26 00:18:47 crc kubenswrapper[4875]: I1126 00:18:47.345077 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" event={"ID":"8d2c9dcf-64cb-4d65-b161-9755122e490f","Type":"ContainerStarted","Data":"2800b62423dde263323c90f6936f100b3491cb2415aff1aac813b332d77fdcb4"} Nov 26 00:18:47 crc kubenswrapper[4875]: I1126 00:18:47.345092 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" event={"ID":"8d2c9dcf-64cb-4d65-b161-9755122e490f","Type":"ContainerStarted","Data":"800af58aa6eee9515e23dc2a4e9525d0494d83588e417ea2b79c710461ab67ac"} Nov 26 00:18:47 crc kubenswrapper[4875]: I1126 00:18:47.345105 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" event={"ID":"8d2c9dcf-64cb-4d65-b161-9755122e490f","Type":"ContainerStarted","Data":"cde948fa0206186e0dd6301533dc4aa9cb3377b7749bdeb69fd22f4555069825"} Nov 26 00:18:47 crc kubenswrapper[4875]: I1126 00:18:47.345117 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" event={"ID":"8d2c9dcf-64cb-4d65-b161-9755122e490f","Type":"ContainerStarted","Data":"d0ccdf0edeafc6a1c46fd8e60f8f83e903d63abd96850a82dbbdf12c8b94c0af"} Nov 26 00:18:47 crc kubenswrapper[4875]: I1126 00:18:47.345128 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" event={"ID":"8d2c9dcf-64cb-4d65-b161-9755122e490f","Type":"ContainerStarted","Data":"b62aacad0e2c09f5ee0adf288c80a6ddf79b5e960f48e05ee2084c393793c583"} Nov 26 00:18:49 crc kubenswrapper[4875]: I1126 00:18:49.360835 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" event={"ID":"8d2c9dcf-64cb-4d65-b161-9755122e490f","Type":"ContainerStarted","Data":"d81ac0c2367c432296df9e8b2bcd8a312dac4842693a661a9af19078c9e7cdd8"} Nov 26 00:18:52 crc kubenswrapper[4875]: I1126 00:18:52.378187 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" event={"ID":"8d2c9dcf-64cb-4d65-b161-9755122e490f","Type":"ContainerStarted","Data":"3f891d48bd63a280d91ed1f96c003642a90f414678c6fb2cd545a29f55635211"} Nov 26 00:18:52 crc kubenswrapper[4875]: I1126 00:18:52.378945 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:52 crc kubenswrapper[4875]: I1126 00:18:52.379053 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:52 crc kubenswrapper[4875]: I1126 00:18:52.379740 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:52 crc kubenswrapper[4875]: I1126 00:18:52.411282 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:52 crc kubenswrapper[4875]: I1126 00:18:52.424204 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:18:52 crc kubenswrapper[4875]: I1126 00:18:52.445839 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" podStartSLOduration=8.445825221 podStartE2EDuration="8.445825221s" podCreationTimestamp="2025-11-26 00:18:44 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:18:52.417489092 +0000 UTC m=+705.607464787" watchObservedRunningTime="2025-11-26 00:18:52.445825221 +0000 UTC m=+705.635800916" Nov 26 00:18:56 crc kubenswrapper[4875]: I1126 00:18:56.529049 4875 scope.go:117] "RemoveContainer" containerID="72b992d4d593659316136ff4983256fa4cb0d025d16018153f4829f1003548ac" Nov 26 00:18:56 crc kubenswrapper[4875]: E1126 00:18:56.531745 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"kube-multus\" with CrashLoopBackOff: \"back-off 20s restarting failed container=kube-multus pod=multus-8wvqp_openshift-multus(9fd65949-6a8a-4828-baa4-1c88c71d5ed2)\"" pod="openshift-multus/multus-8wvqp" podUID="9fd65949-6a8a-4828-baa4-1c88c71d5ed2" Nov 26 00:19:07 crc kubenswrapper[4875]: I1126 00:19:07.512659 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:19:07 crc kubenswrapper[4875]: I1126 00:19:07.513718 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:19:07 crc kubenswrapper[4875]: I1126 00:19:07.752825 4875 scope.go:117] "RemoveContainer" containerID="8403934b57b62597733ce8e578eef53784b1e5a39eba7e4fb8765870a3690424" Nov 26 00:19:08 crc kubenswrapper[4875]: I1126 00:19:08.471718 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8wvqp_9fd65949-6a8a-4828-baa4-1c88c71d5ed2/kube-multus/2.log" Nov 26 00:19:08 crc kubenswrapper[4875]: I1126 00:19:08.528461 4875 scope.go:117] "RemoveContainer" containerID="72b992d4d593659316136ff4983256fa4cb0d025d16018153f4829f1003548ac" Nov 26 00:19:09 crc kubenswrapper[4875]: I1126 00:19:09.480293 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-multus_multus-8wvqp_9fd65949-6a8a-4828-baa4-1c88c71d5ed2/kube-multus/2.log" Nov 26 00:19:09 crc kubenswrapper[4875]: I1126 00:19:09.480647 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-multus/multus-8wvqp" event={"ID":"9fd65949-6a8a-4828-baa4-1c88c71d5ed2","Type":"ContainerStarted","Data":"4e5fe1999c46165a599a05fbe2c19ac7215de21bfa876225bf4fba22d9121c8d"} Nov 26 00:19:15 crc kubenswrapper[4875]: I1126 00:19:15.249900 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-ovn-kubernetes/ovnkube-node-l6jtc" Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.573687 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mx4v"] Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.574833 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" podUID="3a569b99-f211-4204-a526-314e066143c2" containerName="controller-manager" containerID="cri-o://9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d" gracePeriod=30 Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.671648 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg"] Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.672176 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" podUID="42c80491-a1f7-4f1f-b2a0-db682fbc490e" containerName="route-controller-manager" containerID="cri-o://f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89" gracePeriod=30 Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.929559 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.986804 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-config\") pod \"3a569b99-f211-4204-a526-314e066143c2\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.986851 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-client-ca\") pod \"3a569b99-f211-4204-a526-314e066143c2\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.986880 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a569b99-f211-4204-a526-314e066143c2-serving-cert\") pod \"3a569b99-f211-4204-a526-314e066143c2\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.986921 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6nmnh\" (UniqueName: \"kubernetes.io/projected/3a569b99-f211-4204-a526-314e066143c2-kube-api-access-6nmnh\") pod \"3a569b99-f211-4204-a526-314e066143c2\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.986976 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-proxy-ca-bundles\") pod \"3a569b99-f211-4204-a526-314e066143c2\" (UID: \"3a569b99-f211-4204-a526-314e066143c2\") " Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.988255 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-config" (OuterVolumeSpecName: "config") pod "3a569b99-f211-4204-a526-314e066143c2" (UID: "3a569b99-f211-4204-a526-314e066143c2"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.988848 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-client-ca" (OuterVolumeSpecName: "client-ca") pod "3a569b99-f211-4204-a526-314e066143c2" (UID: "3a569b99-f211-4204-a526-314e066143c2"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.988962 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-proxy-ca-bundles" (OuterVolumeSpecName: "proxy-ca-bundles") pod "3a569b99-f211-4204-a526-314e066143c2" (UID: "3a569b99-f211-4204-a526-314e066143c2"). InnerVolumeSpecName "proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.993980 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3a569b99-f211-4204-a526-314e066143c2-kube-api-access-6nmnh" (OuterVolumeSpecName: "kube-api-access-6nmnh") pod "3a569b99-f211-4204-a526-314e066143c2" (UID: "3a569b99-f211-4204-a526-314e066143c2"). InnerVolumeSpecName "kube-api-access-6nmnh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:19:36 crc kubenswrapper[4875]: I1126 00:19:36.995839 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/3a569b99-f211-4204-a526-314e066143c2-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "3a569b99-f211-4204-a526-314e066143c2" (UID: "3a569b99-f211-4204-a526-314e066143c2"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.015821 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.087697 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-8f5px\" (UniqueName: \"kubernetes.io/projected/42c80491-a1f7-4f1f-b2a0-db682fbc490e-kube-api-access-8f5px\") pod \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.087753 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-config\") pod \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.087855 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-client-ca\") pod \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.087892 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42c80491-a1f7-4f1f-b2a0-db682fbc490e-serving-cert\") pod \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\" (UID: \"42c80491-a1f7-4f1f-b2a0-db682fbc490e\") " Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.088083 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.088094 4875 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.088102 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/3a569b99-f211-4204-a526-314e066143c2-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.088110 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6nmnh\" (UniqueName: \"kubernetes.io/projected/3a569b99-f211-4204-a526-314e066143c2-kube-api-access-6nmnh\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.088120 4875 reconciler_common.go:293] "Volume detached for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/3a569b99-f211-4204-a526-314e066143c2-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.088547 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-client-ca" (OuterVolumeSpecName: "client-ca") pod "42c80491-a1f7-4f1f-b2a0-db682fbc490e" (UID: "42c80491-a1f7-4f1f-b2a0-db682fbc490e"). InnerVolumeSpecName "client-ca". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.088583 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-config" (OuterVolumeSpecName: "config") pod "42c80491-a1f7-4f1f-b2a0-db682fbc490e" (UID: "42c80491-a1f7-4f1f-b2a0-db682fbc490e"). InnerVolumeSpecName "config". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.090750 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/42c80491-a1f7-4f1f-b2a0-db682fbc490e-serving-cert" (OuterVolumeSpecName: "serving-cert") pod "42c80491-a1f7-4f1f-b2a0-db682fbc490e" (UID: "42c80491-a1f7-4f1f-b2a0-db682fbc490e"). InnerVolumeSpecName "serving-cert". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.090827 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/42c80491-a1f7-4f1f-b2a0-db682fbc490e-kube-api-access-8f5px" (OuterVolumeSpecName: "kube-api-access-8f5px") pod "42c80491-a1f7-4f1f-b2a0-db682fbc490e" (UID: "42c80491-a1f7-4f1f-b2a0-db682fbc490e"). InnerVolumeSpecName "kube-api-access-8f5px". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.188979 4875 reconciler_common.go:293] "Volume detached for volume \"config\" (UniqueName: \"kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-config\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.189024 4875 reconciler_common.go:293] "Volume detached for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/42c80491-a1f7-4f1f-b2a0-db682fbc490e-client-ca\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.189035 4875 reconciler_common.go:293] "Volume detached for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/42c80491-a1f7-4f1f-b2a0-db682fbc490e-serving-cert\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.189044 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-8f5px\" (UniqueName: \"kubernetes.io/projected/42c80491-a1f7-4f1f-b2a0-db682fbc490e-kube-api-access-8f5px\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.512094 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.512280 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.580632 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7"] Nov 26 00:19:37 crc kubenswrapper[4875]: E1126 00:19:37.580830 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="42c80491-a1f7-4f1f-b2a0-db682fbc490e" containerName="route-controller-manager" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.580843 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="42c80491-a1f7-4f1f-b2a0-db682fbc490e" containerName="route-controller-manager" Nov 26 00:19:37 crc kubenswrapper[4875]: E1126 00:19:37.580860 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3a569b99-f211-4204-a526-314e066143c2" containerName="controller-manager" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.580867 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="3a569b99-f211-4204-a526-314e066143c2" containerName="controller-manager" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.580959 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="42c80491-a1f7-4f1f-b2a0-db682fbc490e" containerName="route-controller-manager" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.580977 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="3a569b99-f211-4204-a526-314e066143c2" containerName="controller-manager" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.581321 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.592203 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7"] Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.621226 4875 generic.go:334] "Generic (PLEG): container finished" podID="3a569b99-f211-4204-a526-314e066143c2" containerID="9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d" exitCode=0 Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.621274 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.621284 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" event={"ID":"3a569b99-f211-4204-a526-314e066143c2","Type":"ContainerDied","Data":"9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d"} Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.621317 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-879f6c89f-5mx4v" event={"ID":"3a569b99-f211-4204-a526-314e066143c2","Type":"ContainerDied","Data":"c02e6919499bc192766025724d7a4be9e79853133112784067578dcbabb000ca"} Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.621335 4875 scope.go:117] "RemoveContainer" containerID="9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.623731 4875 generic.go:334] "Generic (PLEG): container finished" podID="42c80491-a1f7-4f1f-b2a0-db682fbc490e" containerID="f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89" exitCode=0 Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.623737 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.623757 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" event={"ID":"42c80491-a1f7-4f1f-b2a0-db682fbc490e","Type":"ContainerDied","Data":"f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89"} Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.623884 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg" event={"ID":"42c80491-a1f7-4f1f-b2a0-db682fbc490e","Type":"ContainerDied","Data":"9f15e095da8fc9188d7c6ca904d3791cf079efcb5d2e00c579b980e05aa20b7e"} Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.633857 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mx4v"] Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.636465 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-controller-manager/controller-manager-879f6c89f-5mx4v"] Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.638698 4875 scope.go:117] "RemoveContainer" containerID="9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d" Nov 26 00:19:37 crc kubenswrapper[4875]: E1126 00:19:37.639059 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d\": container with ID starting with 9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d not found: ID does not exist" containerID="9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.639090 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d"} err="failed to get container status \"9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d\": rpc error: code = NotFound desc = could not find container \"9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d\": container with ID starting with 9c6fac72b782acfe6337b98f2323fac052576983ca2fb54a59419f80702c3d1d not found: ID does not exist" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.639111 4875 scope.go:117] "RemoveContainer" containerID="f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.645382 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg"] Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.648272 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-6576b87f9c-rlkpg"] Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.652289 4875 scope.go:117] "RemoveContainer" containerID="f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89" Nov 26 00:19:37 crc kubenswrapper[4875]: E1126 00:19:37.652755 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89\": container with ID starting with f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89 not found: ID does not exist" containerID="f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.652795 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89"} err="failed to get container status \"f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89\": rpc error: code = NotFound desc = could not find container \"f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89\": container with ID starting with f6a814d81327c73ae505c66b9ed8fc9fd159d7d798f172847c1c6f962620aa89 not found: ID does not exist" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.694275 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10cc1cd5-d62f-410e-a0bf-21198e58cea9-config\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.694347 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10cc1cd5-d62f-410e-a0bf-21198e58cea9-serving-cert\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.694443 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bwkd7\" (UniqueName: \"kubernetes.io/projected/10cc1cd5-d62f-410e-a0bf-21198e58cea9-kube-api-access-bwkd7\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.694471 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10cc1cd5-d62f-410e-a0bf-21198e58cea9-client-ca\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.795382 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10cc1cd5-d62f-410e-a0bf-21198e58cea9-client-ca\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.795525 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10cc1cd5-d62f-410e-a0bf-21198e58cea9-config\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.795548 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10cc1cd5-d62f-410e-a0bf-21198e58cea9-serving-cert\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.795603 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bwkd7\" (UniqueName: \"kubernetes.io/projected/10cc1cd5-d62f-410e-a0bf-21198e58cea9-kube-api-access-bwkd7\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.796970 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/10cc1cd5-d62f-410e-a0bf-21198e58cea9-client-ca\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.797135 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/10cc1cd5-d62f-410e-a0bf-21198e58cea9-config\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.800575 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/10cc1cd5-d62f-410e-a0bf-21198e58cea9-serving-cert\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.814082 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bwkd7\" (UniqueName: \"kubernetes.io/projected/10cc1cd5-d62f-410e-a0bf-21198e58cea9-kube-api-access-bwkd7\") pod \"route-controller-manager-594d96ffc7-87cm7\" (UID: \"10cc1cd5-d62f-410e-a0bf-21198e58cea9\") " pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:37 crc kubenswrapper[4875]: I1126 00:19:37.897477 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.073511 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7"] Nov 26 00:19:38 crc kubenswrapper[4875]: W1126 00:19:38.087441 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod10cc1cd5_d62f_410e_a0bf_21198e58cea9.slice/crio-18155b91a124c95475430d3cca1466d4dbdade63045df042ce4fb27f0ccdd51f WatchSource:0}: Error finding container 18155b91a124c95475430d3cca1466d4dbdade63045df042ce4fb27f0ccdd51f: Status 404 returned error can't find the container with id 18155b91a124c95475430d3cca1466d4dbdade63045df042ce4fb27f0ccdd51f Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.310722 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c"] Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.311608 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.314469 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.314715 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.314856 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.314908 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.315160 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.315359 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.320888 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.328230 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c"] Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.403811 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-serving-cert\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.404055 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-config\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.404195 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gntpb\" (UniqueName: \"kubernetes.io/projected/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-kube-api-access-gntpb\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.404433 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-proxy-ca-bundles\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.404581 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-client-ca\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.505392 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-serving-cert\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.505458 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-config\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.505482 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gntpb\" (UniqueName: \"kubernetes.io/projected/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-kube-api-access-gntpb\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.505527 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-proxy-ca-bundles\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.505558 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-client-ca\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.506690 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"client-ca\" (UniqueName: \"kubernetes.io/configmap/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-client-ca\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.506818 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-proxy-ca-bundles\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.506986 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config\" (UniqueName: \"kubernetes.io/configmap/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-config\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.510847 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"serving-cert\" (UniqueName: \"kubernetes.io/secret/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-serving-cert\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.521522 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gntpb\" (UniqueName: \"kubernetes.io/projected/ddf38655-ea65-4f79-a2f2-cc31ee3d0392-kube-api-access-gntpb\") pod \"controller-manager-5dbd5cb94c-68f8c\" (UID: \"ddf38655-ea65-4f79-a2f2-cc31ee3d0392\") " pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.628219 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.631314 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" event={"ID":"10cc1cd5-d62f-410e-a0bf-21198e58cea9","Type":"ContainerStarted","Data":"a6167b271d87aaffe73734652c7966c2f9543e003a3f3facd250b1135e210833"} Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.631361 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" event={"ID":"10cc1cd5-d62f-410e-a0bf-21198e58cea9","Type":"ContainerStarted","Data":"18155b91a124c95475430d3cca1466d4dbdade63045df042ce4fb27f0ccdd51f"} Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.635115 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.645046 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" Nov 26 00:19:38 crc kubenswrapper[4875]: I1126 00:19:38.663074 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-route-controller-manager/route-controller-manager-594d96ffc7-87cm7" podStartSLOduration=1.663057753 podStartE2EDuration="1.663057753s" podCreationTimestamp="2025-11-26 00:19:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:19:38.660147024 +0000 UTC m=+751.850122729" watchObservedRunningTime="2025-11-26 00:19:38.663057753 +0000 UTC m=+751.853033448" Nov 26 00:19:39 crc kubenswrapper[4875]: I1126 00:19:39.011184 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c"] Nov 26 00:19:39 crc kubenswrapper[4875]: I1126 00:19:39.535988 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3a569b99-f211-4204-a526-314e066143c2" path="/var/lib/kubelet/pods/3a569b99-f211-4204-a526-314e066143c2/volumes" Nov 26 00:19:39 crc kubenswrapper[4875]: I1126 00:19:39.536906 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="42c80491-a1f7-4f1f-b2a0-db682fbc490e" path="/var/lib/kubelet/pods/42c80491-a1f7-4f1f-b2a0-db682fbc490e/volumes" Nov 26 00:19:39 crc kubenswrapper[4875]: I1126 00:19:39.636651 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" event={"ID":"ddf38655-ea65-4f79-a2f2-cc31ee3d0392","Type":"ContainerStarted","Data":"565c6482f3cbae2e8b69518cd8942f16f21ba03b9ca391ee3bd16040912a9c78"} Nov 26 00:19:39 crc kubenswrapper[4875]: I1126 00:19:39.636698 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" event={"ID":"ddf38655-ea65-4f79-a2f2-cc31ee3d0392","Type":"ContainerStarted","Data":"42cc5c0a1558632d2209c8c0ffedf6377a384842a728982162b6c5f8925ebc68"} Nov 26 00:19:39 crc kubenswrapper[4875]: I1126 00:19:39.636900 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:39 crc kubenswrapper[4875]: I1126 00:19:39.642463 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" Nov 26 00:19:39 crc kubenswrapper[4875]: I1126 00:19:39.655823 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-controller-manager/controller-manager-5dbd5cb94c-68f8c" podStartSLOduration=3.655807015 podStartE2EDuration="3.655807015s" podCreationTimestamp="2025-11-26 00:19:36 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:19:39.655143137 +0000 UTC m=+752.845118832" watchObservedRunningTime="2025-11-26 00:19:39.655807015 +0000 UTC m=+752.845782710" Nov 26 00:19:44 crc kubenswrapper[4875]: I1126 00:19:44.298407 4875 dynamic_cafile_content.go:123] "Loaded a new CA Bundle and Verifier" name="client-ca-bundle::/etc/kubernetes/kubelet-ca.crt" Nov 26 00:19:53 crc kubenswrapper[4875]: I1126 00:19:53.911651 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7k6"] Nov 26 00:19:53 crc kubenswrapper[4875]: I1126 00:19:53.912573 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-marketplace-9p7k6" podUID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" containerName="registry-server" containerID="cri-o://16e7b57d222f8b3cbe8a3d561c96e1f9a98a55c67cc905fba59da0721ee20516" gracePeriod=30 Nov 26 00:19:54 crc kubenswrapper[4875]: I1126 00:19:54.710272 4875 generic.go:334] "Generic (PLEG): container finished" podID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" containerID="16e7b57d222f8b3cbe8a3d561c96e1f9a98a55c67cc905fba59da0721ee20516" exitCode=0 Nov 26 00:19:54 crc kubenswrapper[4875]: I1126 00:19:54.710514 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7k6" event={"ID":"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd","Type":"ContainerDied","Data":"16e7b57d222f8b3cbe8a3d561c96e1f9a98a55c67cc905fba59da0721ee20516"} Nov 26 00:19:54 crc kubenswrapper[4875]: I1126 00:19:54.841176 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:19:54 crc kubenswrapper[4875]: I1126 00:19:54.996804 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-catalog-content\") pod \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " Nov 26 00:19:54 crc kubenswrapper[4875]: I1126 00:19:54.996885 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-utilities\") pod \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " Nov 26 00:19:54 crc kubenswrapper[4875]: I1126 00:19:54.996957 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vh9kg\" (UniqueName: \"kubernetes.io/projected/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-kube-api-access-vh9kg\") pod \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\" (UID: \"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd\") " Nov 26 00:19:54 crc kubenswrapper[4875]: I1126 00:19:54.997843 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-utilities" (OuterVolumeSpecName: "utilities") pod "a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" (UID: "a9184a91-8dc4-47a4-a62f-23dd90cfd3fd"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.002731 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-kube-api-access-vh9kg" (OuterVolumeSpecName: "kube-api-access-vh9kg") pod "a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" (UID: "a9184a91-8dc4-47a4-a62f-23dd90cfd3fd"). InnerVolumeSpecName "kube-api-access-vh9kg". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.016164 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" (UID: "a9184a91-8dc4-47a4-a62f-23dd90cfd3fd"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.098857 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.098889 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.098902 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vh9kg\" (UniqueName: \"kubernetes.io/projected/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd-kube-api-access-vh9kg\") on node \"crc\" DevicePath \"\"" Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.716096 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-marketplace-9p7k6" event={"ID":"a9184a91-8dc4-47a4-a62f-23dd90cfd3fd","Type":"ContainerDied","Data":"2ff268aca290d6fb614721865166e509518102349782d0ae468182d324e536a0"} Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.716462 4875 scope.go:117] "RemoveContainer" containerID="16e7b57d222f8b3cbe8a3d561c96e1f9a98a55c67cc905fba59da0721ee20516" Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.716142 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-marketplace-9p7k6" Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.734213 4875 scope.go:117] "RemoveContainer" containerID="f4e154008580e6279dace524a49f8b41977fe85e7baa872c33a6093fbf16c848" Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.734630 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7k6"] Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.737535 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-marketplace-9p7k6"] Nov 26 00:19:55 crc kubenswrapper[4875]: I1126 00:19:55.745806 4875 scope.go:117] "RemoveContainer" containerID="7f6a065c83fb0961c5c9975f25d18c403b15e58d5c8c1e3783c4dca5cbe09ef8" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.503712 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q"] Nov 26 00:19:57 crc kubenswrapper[4875]: E1126 00:19:57.503942 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" containerName="extract-content" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.503958 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" containerName="extract-content" Nov 26 00:19:57 crc kubenswrapper[4875]: E1126 00:19:57.503971 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" containerName="registry-server" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.503979 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" containerName="registry-server" Nov 26 00:19:57 crc kubenswrapper[4875]: E1126 00:19:57.503992 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" containerName="extract-utilities" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.504000 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" containerName="extract-utilities" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.504114 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" containerName="registry-server" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.504924 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.511751 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.516739 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q"] Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.537131 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a9184a91-8dc4-47a4-a62f-23dd90cfd3fd" path="/var/lib/kubelet/pods/a9184a91-8dc4-47a4-a62f-23dd90cfd3fd/volumes" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.627229 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vlb2k\" (UniqueName: \"kubernetes.io/projected/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-kube-api-access-vlb2k\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.627750 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.627951 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.729154 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.729213 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vlb2k\" (UniqueName: \"kubernetes.io/projected/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-kube-api-access-vlb2k\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.729312 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.729839 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-bundle\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.730301 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-util\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.745975 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vlb2k\" (UniqueName: \"kubernetes.io/projected/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-kube-api-access-vlb2k\") pod \"6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:19:57 crc kubenswrapper[4875]: I1126 00:19:57.819857 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:19:58 crc kubenswrapper[4875]: I1126 00:19:58.226736 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q"] Nov 26 00:19:58 crc kubenswrapper[4875]: I1126 00:19:58.731079 4875 generic.go:334] "Generic (PLEG): container finished" podID="f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" containerID="e95ae9f1971e04928d5b7907e37bdd34394d1d5b10f7eac9debaa62fb95da0cf" exitCode=0 Nov 26 00:19:58 crc kubenswrapper[4875]: I1126 00:19:58.731122 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" event={"ID":"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e","Type":"ContainerDied","Data":"e95ae9f1971e04928d5b7907e37bdd34394d1d5b10f7eac9debaa62fb95da0cf"} Nov 26 00:19:58 crc kubenswrapper[4875]: I1126 00:19:58.731149 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" event={"ID":"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e","Type":"ContainerStarted","Data":"cd66ce96e6742326339c453e4d024c1ca71cae19a250b3a35760fc66dff3e0c5"} Nov 26 00:19:58 crc kubenswrapper[4875]: I1126 00:19:58.732905 4875 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.266786 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-cz5kn"] Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.268341 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.274371 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cz5kn"] Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.460592 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-pzfzt\" (UniqueName: \"kubernetes.io/projected/a1aeac16-3975-48e6-84b4-00aa7aee70a1-kube-api-access-pzfzt\") pod \"redhat-operators-cz5kn\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.460701 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-utilities\") pod \"redhat-operators-cz5kn\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.460743 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-catalog-content\") pod \"redhat-operators-cz5kn\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.561674 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-pzfzt\" (UniqueName: \"kubernetes.io/projected/a1aeac16-3975-48e6-84b4-00aa7aee70a1-kube-api-access-pzfzt\") pod \"redhat-operators-cz5kn\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.561770 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-utilities\") pod \"redhat-operators-cz5kn\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.561801 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-catalog-content\") pod \"redhat-operators-cz5kn\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.562327 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-utilities\") pod \"redhat-operators-cz5kn\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.562369 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-catalog-content\") pod \"redhat-operators-cz5kn\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.581184 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-pzfzt\" (UniqueName: \"kubernetes.io/projected/a1aeac16-3975-48e6-84b4-00aa7aee70a1-kube-api-access-pzfzt\") pod \"redhat-operators-cz5kn\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.633827 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.744096 4875 generic.go:334] "Generic (PLEG): container finished" podID="f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" containerID="a197e6105dbc639f8781f75becf7a30480db4211fec950137c49572e9d6a4458" exitCode=0 Nov 26 00:20:00 crc kubenswrapper[4875]: I1126 00:20:00.744154 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" event={"ID":"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e","Type":"ContainerDied","Data":"a197e6105dbc639f8781f75becf7a30480db4211fec950137c49572e9d6a4458"} Nov 26 00:20:01 crc kubenswrapper[4875]: I1126 00:20:01.047331 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-cz5kn"] Nov 26 00:20:01 crc kubenswrapper[4875]: W1126 00:20:01.058584 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda1aeac16_3975_48e6_84b4_00aa7aee70a1.slice/crio-5d2df86196c5b8e305306f5323a5d2e066d641f98f02477345f30ae9873e5ce8 WatchSource:0}: Error finding container 5d2df86196c5b8e305306f5323a5d2e066d641f98f02477345f30ae9873e5ce8: Status 404 returned error can't find the container with id 5d2df86196c5b8e305306f5323a5d2e066d641f98f02477345f30ae9873e5ce8 Nov 26 00:20:01 crc kubenswrapper[4875]: I1126 00:20:01.753652 4875 generic.go:334] "Generic (PLEG): container finished" podID="f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" containerID="5f8eb762ea7d7b432180d33b5226340f03b60858b341cbb1782aa6b0aaaacb52" exitCode=0 Nov 26 00:20:01 crc kubenswrapper[4875]: I1126 00:20:01.753709 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" event={"ID":"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e","Type":"ContainerDied","Data":"5f8eb762ea7d7b432180d33b5226340f03b60858b341cbb1782aa6b0aaaacb52"} Nov 26 00:20:01 crc kubenswrapper[4875]: I1126 00:20:01.756507 4875 generic.go:334] "Generic (PLEG): container finished" podID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" containerID="f427acb753542eb5f8d946cb49b74a5d13d78fd978b299adb6bd5662ab0ed816" exitCode=0 Nov 26 00:20:01 crc kubenswrapper[4875]: I1126 00:20:01.756578 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cz5kn" event={"ID":"a1aeac16-3975-48e6-84b4-00aa7aee70a1","Type":"ContainerDied","Data":"f427acb753542eb5f8d946cb49b74a5d13d78fd978b299adb6bd5662ab0ed816"} Nov 26 00:20:01 crc kubenswrapper[4875]: I1126 00:20:01.756621 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cz5kn" event={"ID":"a1aeac16-3975-48e6-84b4-00aa7aee70a1","Type":"ContainerStarted","Data":"5d2df86196c5b8e305306f5323a5d2e066d641f98f02477345f30ae9873e5ce8"} Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.113215 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.190860 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-util\") pod \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.190932 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-bundle\") pod \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.193081 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-bundle" (OuterVolumeSpecName: "bundle") pod "f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" (UID: "f3e8bc9f-25b1-4d1b-b133-26d144e2c95e"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.205139 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-util" (OuterVolumeSpecName: "util") pod "f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" (UID: "f3e8bc9f-25b1-4d1b-b133-26d144e2c95e"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.292180 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vlb2k\" (UniqueName: \"kubernetes.io/projected/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-kube-api-access-vlb2k\") pod \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\" (UID: \"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e\") " Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.292568 4875 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.292594 4875 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-util\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.300686 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-kube-api-access-vlb2k" (OuterVolumeSpecName: "kube-api-access-vlb2k") pod "f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" (UID: "f3e8bc9f-25b1-4d1b-b133-26d144e2c95e"). InnerVolumeSpecName "kube-api-access-vlb2k". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.393911 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vlb2k\" (UniqueName: \"kubernetes.io/projected/f3e8bc9f-25b1-4d1b-b133-26d144e2c95e-kube-api-access-vlb2k\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.766888 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" event={"ID":"f3e8bc9f-25b1-4d1b-b133-26d144e2c95e","Type":"ContainerDied","Data":"cd66ce96e6742326339c453e4d024c1ca71cae19a250b3a35760fc66dff3e0c5"} Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.766938 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="cd66ce96e6742326339c453e4d024c1ca71cae19a250b3a35760fc66dff3e0c5" Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.767080 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6c372a8d094fad7255d3bbeabb4914bd2356af7b203a2d2176be1c9210jgt4q" Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.768373 4875 generic.go:334] "Generic (PLEG): container finished" podID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" containerID="ab4081b3c3350348d355210632e5a849764206736123745b137b0ac00c6a3f49" exitCode=0 Nov 26 00:20:03 crc kubenswrapper[4875]: I1126 00:20:03.768420 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cz5kn" event={"ID":"a1aeac16-3975-48e6-84b4-00aa7aee70a1","Type":"ContainerDied","Data":"ab4081b3c3350348d355210632e5a849764206736123745b137b0ac00c6a3f49"} Nov 26 00:20:04 crc kubenswrapper[4875]: I1126 00:20:04.775335 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cz5kn" event={"ID":"a1aeac16-3975-48e6-84b4-00aa7aee70a1","Type":"ContainerStarted","Data":"6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e"} Nov 26 00:20:04 crc kubenswrapper[4875]: I1126 00:20:04.792779 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-cz5kn" podStartSLOduration=2.073000535 podStartE2EDuration="4.792760414s" podCreationTimestamp="2025-11-26 00:20:00 +0000 UTC" firstStartedPulling="2025-11-26 00:20:01.757890614 +0000 UTC m=+774.947866299" lastFinishedPulling="2025-11-26 00:20:04.477650483 +0000 UTC m=+777.667626178" observedRunningTime="2025-11-26 00:20:04.791628164 +0000 UTC m=+777.981603869" watchObservedRunningTime="2025-11-26 00:20:04.792760414 +0000 UTC m=+777.982736109" Nov 26 00:20:04 crc kubenswrapper[4875]: I1126 00:20:04.893394 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57"] Nov 26 00:20:04 crc kubenswrapper[4875]: E1126 00:20:04.893597 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" containerName="pull" Nov 26 00:20:04 crc kubenswrapper[4875]: I1126 00:20:04.893608 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" containerName="pull" Nov 26 00:20:04 crc kubenswrapper[4875]: E1126 00:20:04.893620 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" containerName="extract" Nov 26 00:20:04 crc kubenswrapper[4875]: I1126 00:20:04.893625 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" containerName="extract" Nov 26 00:20:04 crc kubenswrapper[4875]: E1126 00:20:04.893646 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" containerName="util" Nov 26 00:20:04 crc kubenswrapper[4875]: I1126 00:20:04.893651 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" containerName="util" Nov 26 00:20:04 crc kubenswrapper[4875]: I1126 00:20:04.893738 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="f3e8bc9f-25b1-4d1b-b133-26d144e2c95e" containerName="extract" Nov 26 00:20:04 crc kubenswrapper[4875]: I1126 00:20:04.894451 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:04 crc kubenswrapper[4875]: I1126 00:20:04.896072 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 00:20:04 crc kubenswrapper[4875]: I1126 00:20:04.904001 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57"] Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.013443 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-bundle\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.013497 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-m9mqz\" (UniqueName: \"kubernetes.io/projected/dd60cfa9-3d05-4021-9983-1865cef7200f-kube-api-access-m9mqz\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.013540 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-util\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.115065 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-util\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.115274 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-bundle\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.115354 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-m9mqz\" (UniqueName: \"kubernetes.io/projected/dd60cfa9-3d05-4021-9983-1865cef7200f-kube-api-access-m9mqz\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.115373 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-util\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.115797 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-bundle\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.132855 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-m9mqz\" (UniqueName: \"kubernetes.io/projected/dd60cfa9-3d05-4021-9983-1865cef7200f-kube-api-access-m9mqz\") pod \"8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.213488 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.620452 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57"] Nov 26 00:20:05 crc kubenswrapper[4875]: W1126 00:20:05.629211 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddd60cfa9_3d05_4021_9983_1865cef7200f.slice/crio-541845cd17ea38008d4424301dc49a4e331329bbe096fc24b6b04a1d54ee9e84 WatchSource:0}: Error finding container 541845cd17ea38008d4424301dc49a4e331329bbe096fc24b6b04a1d54ee9e84: Status 404 returned error can't find the container with id 541845cd17ea38008d4424301dc49a4e331329bbe096fc24b6b04a1d54ee9e84 Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.781353 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" event={"ID":"dd60cfa9-3d05-4021-9983-1865cef7200f","Type":"ContainerStarted","Data":"34be9c80aae02862ea8ec01daa34c4836e6fc447c19b50e8354be809e8a7f235"} Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.781397 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" event={"ID":"dd60cfa9-3d05-4021-9983-1865cef7200f","Type":"ContainerStarted","Data":"541845cd17ea38008d4424301dc49a4e331329bbe096fc24b6b04a1d54ee9e84"} Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.905458 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx"] Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.906999 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:05 crc kubenswrapper[4875]: I1126 00:20:05.910468 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx"] Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.024523 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-bundle\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.024580 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-ntn57\" (UniqueName: \"kubernetes.io/projected/3f58a827-137d-4b77-bdc6-965e778f362f-kube-api-access-ntn57\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.024599 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-util\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.125232 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-bundle\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.125319 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-util\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.125351 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-ntn57\" (UniqueName: \"kubernetes.io/projected/3f58a827-137d-4b77-bdc6-965e778f362f-kube-api-access-ntn57\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.125698 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-bundle\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.125716 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-util\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.143912 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-ntn57\" (UniqueName: \"kubernetes.io/projected/3f58a827-137d-4b77-bdc6-965e778f362f-kube-api-access-ntn57\") pod \"6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.228286 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.644487 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx"] Nov 26 00:20:06 crc kubenswrapper[4875]: W1126 00:20:06.654205 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3f58a827_137d_4b77_bdc6_965e778f362f.slice/crio-ac63cfde6ae42f486a001765b179399fb3ac4871cc8172e92193b31107629ded WatchSource:0}: Error finding container ac63cfde6ae42f486a001765b179399fb3ac4871cc8172e92193b31107629ded: Status 404 returned error can't find the container with id ac63cfde6ae42f486a001765b179399fb3ac4871cc8172e92193b31107629ded Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.790465 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" event={"ID":"3f58a827-137d-4b77-bdc6-965e778f362f","Type":"ContainerStarted","Data":"ac63cfde6ae42f486a001765b179399fb3ac4871cc8172e92193b31107629ded"} Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.793979 4875 generic.go:334] "Generic (PLEG): container finished" podID="dd60cfa9-3d05-4021-9983-1865cef7200f" containerID="34be9c80aae02862ea8ec01daa34c4836e6fc447c19b50e8354be809e8a7f235" exitCode=0 Nov 26 00:20:06 crc kubenswrapper[4875]: I1126 00:20:06.794030 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" event={"ID":"dd60cfa9-3d05-4021-9983-1865cef7200f","Type":"ContainerDied","Data":"34be9c80aae02862ea8ec01daa34c4836e6fc447c19b50e8354be809e8a7f235"} Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.511905 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.511969 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.512072 4875 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.512517 4875 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"4a7640d4f785bf9d0f293ee193979132e8a131e63ea85ce86ad12662c6293db6"} pod="openshift-machine-config-operator/machine-config-daemon-9mztb" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.512583 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" containerID="cri-o://4a7640d4f785bf9d0f293ee193979132e8a131e63ea85ce86ad12662c6293db6" gracePeriod=600 Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.800968 4875 generic.go:334] "Generic (PLEG): container finished" podID="3f58a827-137d-4b77-bdc6-965e778f362f" containerID="8f69e86f6e26bd9fe966e6373e8113594b846d82f18f999a92ab4dfe8e23dc13" exitCode=0 Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.801085 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" event={"ID":"3f58a827-137d-4b77-bdc6-965e778f362f","Type":"ContainerDied","Data":"8f69e86f6e26bd9fe966e6373e8113594b846d82f18f999a92ab4dfe8e23dc13"} Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.804887 4875 generic.go:334] "Generic (PLEG): container finished" podID="dd60cfa9-3d05-4021-9983-1865cef7200f" containerID="e44cbe154a7203d147fd381262d3e1a0bf50b1dddfe99a26ae649533325c525d" exitCode=0 Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.804934 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" event={"ID":"dd60cfa9-3d05-4021-9983-1865cef7200f","Type":"ContainerDied","Data":"e44cbe154a7203d147fd381262d3e1a0bf50b1dddfe99a26ae649533325c525d"} Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.810267 4875 generic.go:334] "Generic (PLEG): container finished" podID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerID="4a7640d4f785bf9d0f293ee193979132e8a131e63ea85ce86ad12662c6293db6" exitCode=0 Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.810302 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerDied","Data":"4a7640d4f785bf9d0f293ee193979132e8a131e63ea85ce86ad12662c6293db6"} Nov 26 00:20:07 crc kubenswrapper[4875]: I1126 00:20:07.810378 4875 scope.go:117] "RemoveContainer" containerID="c8a7242997bc14a8e06319ccddca75cbab2251128f6918fc296f21726f9be284" Nov 26 00:20:08 crc kubenswrapper[4875]: I1126 00:20:08.822115 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" event={"ID":"dd60cfa9-3d05-4021-9983-1865cef7200f","Type":"ContainerStarted","Data":"00579fa2d1a78aa621ed665549c1fac2c803ad4cc9410a9e52aea7fa36428779"} Nov 26 00:20:08 crc kubenswrapper[4875]: I1126 00:20:08.825834 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerStarted","Data":"46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9"} Nov 26 00:20:08 crc kubenswrapper[4875]: I1126 00:20:08.853835 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" podStartSLOduration=4.141837302 podStartE2EDuration="4.85381703s" podCreationTimestamp="2025-11-26 00:20:04 +0000 UTC" firstStartedPulling="2025-11-26 00:20:06.797349581 +0000 UTC m=+779.987325276" lastFinishedPulling="2025-11-26 00:20:07.509329309 +0000 UTC m=+780.699305004" observedRunningTime="2025-11-26 00:20:08.840300802 +0000 UTC m=+782.030276497" watchObservedRunningTime="2025-11-26 00:20:08.85381703 +0000 UTC m=+782.043792725" Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.662836 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-ptghh"] Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.664266 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.675436 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-77c5t\" (UniqueName: \"kubernetes.io/projected/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-kube-api-access-77c5t\") pod \"certified-operators-ptghh\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.675516 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-catalog-content\") pod \"certified-operators-ptghh\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.675569 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-utilities\") pod \"certified-operators-ptghh\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.676610 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ptghh"] Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.776742 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-catalog-content\") pod \"certified-operators-ptghh\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.776817 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-utilities\") pod \"certified-operators-ptghh\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.776862 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-77c5t\" (UniqueName: \"kubernetes.io/projected/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-kube-api-access-77c5t\") pod \"certified-operators-ptghh\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.777372 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-utilities\") pod \"certified-operators-ptghh\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.777697 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-catalog-content\") pod \"certified-operators-ptghh\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.805674 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-77c5t\" (UniqueName: \"kubernetes.io/projected/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-kube-api-access-77c5t\") pod \"certified-operators-ptghh\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.831944 4875 generic.go:334] "Generic (PLEG): container finished" podID="3f58a827-137d-4b77-bdc6-965e778f362f" containerID="b2a0f37601963a19ac54d76aeb7adb4b4ad13c81753548df87df709be27d73cc" exitCode=0 Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.831996 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" event={"ID":"3f58a827-137d-4b77-bdc6-965e778f362f","Type":"ContainerDied","Data":"b2a0f37601963a19ac54d76aeb7adb4b4ad13c81753548df87df709be27d73cc"} Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.835765 4875 generic.go:334] "Generic (PLEG): container finished" podID="dd60cfa9-3d05-4021-9983-1865cef7200f" containerID="00579fa2d1a78aa621ed665549c1fac2c803ad4cc9410a9e52aea7fa36428779" exitCode=0 Nov 26 00:20:09 crc kubenswrapper[4875]: I1126 00:20:09.836313 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" event={"ID":"dd60cfa9-3d05-4021-9983-1865cef7200f","Type":"ContainerDied","Data":"00579fa2d1a78aa621ed665549c1fac2c803ad4cc9410a9e52aea7fa36428779"} Nov 26 00:20:10 crc kubenswrapper[4875]: I1126 00:20:10.043799 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:10 crc kubenswrapper[4875]: I1126 00:20:10.530345 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-ptghh"] Nov 26 00:20:10 crc kubenswrapper[4875]: I1126 00:20:10.634579 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:10 crc kubenswrapper[4875]: I1126 00:20:10.634921 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:10 crc kubenswrapper[4875]: I1126 00:20:10.728152 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:10 crc kubenswrapper[4875]: I1126 00:20:10.841380 4875 generic.go:334] "Generic (PLEG): container finished" podID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" containerID="33eec501934da5fd333f24bbbf4a37541f6a15a05e0136fd375845162a8f1e5a" exitCode=0 Nov 26 00:20:10 crc kubenswrapper[4875]: I1126 00:20:10.841459 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ptghh" event={"ID":"0cdb1c9a-f742-4e18-9e17-3bed2687ff54","Type":"ContainerDied","Data":"33eec501934da5fd333f24bbbf4a37541f6a15a05e0136fd375845162a8f1e5a"} Nov 26 00:20:10 crc kubenswrapper[4875]: I1126 00:20:10.841492 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ptghh" event={"ID":"0cdb1c9a-f742-4e18-9e17-3bed2687ff54","Type":"ContainerStarted","Data":"9e85e825c175f1c7cfdbad6eed46d4a1eb363ecdb6ddc986f70275cbdf67ffee"} Nov 26 00:20:10 crc kubenswrapper[4875]: I1126 00:20:10.844839 4875 generic.go:334] "Generic (PLEG): container finished" podID="3f58a827-137d-4b77-bdc6-965e778f362f" containerID="cc28e7871222b59b3ea332822fb05781121fc0f1a9aa74025f4bc4093e7c29f4" exitCode=0 Nov 26 00:20:10 crc kubenswrapper[4875]: I1126 00:20:10.844904 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" event={"ID":"3f58a827-137d-4b77-bdc6-965e778f362f","Type":"ContainerDied","Data":"cc28e7871222b59b3ea332822fb05781121fc0f1a9aa74025f4bc4093e7c29f4"} Nov 26 00:20:10 crc kubenswrapper[4875]: I1126 00:20:10.924055 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.377652 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.494741 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-bundle\") pod \"dd60cfa9-3d05-4021-9983-1865cef7200f\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.494792 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-util\") pod \"dd60cfa9-3d05-4021-9983-1865cef7200f\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.494822 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-m9mqz\" (UniqueName: \"kubernetes.io/projected/dd60cfa9-3d05-4021-9983-1865cef7200f-kube-api-access-m9mqz\") pod \"dd60cfa9-3d05-4021-9983-1865cef7200f\" (UID: \"dd60cfa9-3d05-4021-9983-1865cef7200f\") " Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.499479 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-bundle" (OuterVolumeSpecName: "bundle") pod "dd60cfa9-3d05-4021-9983-1865cef7200f" (UID: "dd60cfa9-3d05-4021-9983-1865cef7200f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.505643 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dd60cfa9-3d05-4021-9983-1865cef7200f-kube-api-access-m9mqz" (OuterVolumeSpecName: "kube-api-access-m9mqz") pod "dd60cfa9-3d05-4021-9983-1865cef7200f" (UID: "dd60cfa9-3d05-4021-9983-1865cef7200f"). InnerVolumeSpecName "kube-api-access-m9mqz". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.508812 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-util" (OuterVolumeSpecName: "util") pod "dd60cfa9-3d05-4021-9983-1865cef7200f" (UID: "dd60cfa9-3d05-4021-9983-1865cef7200f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.596254 4875 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.596306 4875 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/dd60cfa9-3d05-4021-9983-1865cef7200f-util\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.596319 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-m9mqz\" (UniqueName: \"kubernetes.io/projected/dd60cfa9-3d05-4021-9983-1865cef7200f-kube-api-access-m9mqz\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.865744 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.865853 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/8ed862a309935d5a1c8012df79b93f7fb46e029d4689f7f6ddcb9e7f5efph57" event={"ID":"dd60cfa9-3d05-4021-9983-1865cef7200f","Type":"ContainerDied","Data":"541845cd17ea38008d4424301dc49a4e331329bbe096fc24b6b04a1d54ee9e84"} Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.866839 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="541845cd17ea38008d4424301dc49a4e331329bbe096fc24b6b04a1d54ee9e84" Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.869521 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ptghh" event={"ID":"0cdb1c9a-f742-4e18-9e17-3bed2687ff54","Type":"ContainerStarted","Data":"fd0d75a072e5c664a2cb71353bcaec9ac21de36d5fbdb70609c449d02ccff059"} Nov 26 00:20:11 crc kubenswrapper[4875]: I1126 00:20:11.878483 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cz5kn"] Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.336271 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.505822 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-ntn57\" (UniqueName: \"kubernetes.io/projected/3f58a827-137d-4b77-bdc6-965e778f362f-kube-api-access-ntn57\") pod \"3f58a827-137d-4b77-bdc6-965e778f362f\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.505885 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-util\") pod \"3f58a827-137d-4b77-bdc6-965e778f362f\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.505954 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-bundle\") pod \"3f58a827-137d-4b77-bdc6-965e778f362f\" (UID: \"3f58a827-137d-4b77-bdc6-965e778f362f\") " Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.506629 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-bundle" (OuterVolumeSpecName: "bundle") pod "3f58a827-137d-4b77-bdc6-965e778f362f" (UID: "3f58a827-137d-4b77-bdc6-965e778f362f"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.517481 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-util" (OuterVolumeSpecName: "util") pod "3f58a827-137d-4b77-bdc6-965e778f362f" (UID: "3f58a827-137d-4b77-bdc6-965e778f362f"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.521020 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3f58a827-137d-4b77-bdc6-965e778f362f-kube-api-access-ntn57" (OuterVolumeSpecName: "kube-api-access-ntn57") pod "3f58a827-137d-4b77-bdc6-965e778f362f" (UID: "3f58a827-137d-4b77-bdc6-965e778f362f"). InnerVolumeSpecName "kube-api-access-ntn57". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.607363 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-ntn57\" (UniqueName: \"kubernetes.io/projected/3f58a827-137d-4b77-bdc6-965e778f362f-kube-api-access-ntn57\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.607401 4875 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-util\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.607428 4875 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/3f58a827-137d-4b77-bdc6-965e778f362f-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.875139 4875 generic.go:334] "Generic (PLEG): container finished" podID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" containerID="fd0d75a072e5c664a2cb71353bcaec9ac21de36d5fbdb70609c449d02ccff059" exitCode=0 Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.875215 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ptghh" event={"ID":"0cdb1c9a-f742-4e18-9e17-3bed2687ff54","Type":"ContainerDied","Data":"fd0d75a072e5c664a2cb71353bcaec9ac21de36d5fbdb70609c449d02ccff059"} Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.877671 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.877708 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/6e3e74c24700cc2bb66271d960117ff0976dc779e6a3bc37905b952e8ftjbwx" event={"ID":"3f58a827-137d-4b77-bdc6-965e778f362f","Type":"ContainerDied","Data":"ac63cfde6ae42f486a001765b179399fb3ac4871cc8172e92193b31107629ded"} Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.877729 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="ac63cfde6ae42f486a001765b179399fb3ac4871cc8172e92193b31107629ded" Nov 26 00:20:12 crc kubenswrapper[4875]: I1126 00:20:12.877769 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-cz5kn" podUID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" containerName="registry-server" containerID="cri-o://6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e" gracePeriod=2 Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.740394 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c"] Nov 26 00:20:13 crc kubenswrapper[4875]: E1126 00:20:13.741026 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd60cfa9-3d05-4021-9983-1865cef7200f" containerName="extract" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.741046 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd60cfa9-3d05-4021-9983-1865cef7200f" containerName="extract" Nov 26 00:20:13 crc kubenswrapper[4875]: E1126 00:20:13.741062 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f58a827-137d-4b77-bdc6-965e778f362f" containerName="util" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.741072 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f58a827-137d-4b77-bdc6-965e778f362f" containerName="util" Nov 26 00:20:13 crc kubenswrapper[4875]: E1126 00:20:13.741086 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f58a827-137d-4b77-bdc6-965e778f362f" containerName="extract" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.741106 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f58a827-137d-4b77-bdc6-965e778f362f" containerName="extract" Nov 26 00:20:13 crc kubenswrapper[4875]: E1126 00:20:13.741115 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd60cfa9-3d05-4021-9983-1865cef7200f" containerName="pull" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.741121 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd60cfa9-3d05-4021-9983-1865cef7200f" containerName="pull" Nov 26 00:20:13 crc kubenswrapper[4875]: E1126 00:20:13.741131 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dd60cfa9-3d05-4021-9983-1865cef7200f" containerName="util" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.741139 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="dd60cfa9-3d05-4021-9983-1865cef7200f" containerName="util" Nov 26 00:20:13 crc kubenswrapper[4875]: E1126 00:20:13.741157 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="3f58a827-137d-4b77-bdc6-965e778f362f" containerName="pull" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.741164 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="3f58a827-137d-4b77-bdc6-965e778f362f" containerName="pull" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.741278 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="dd60cfa9-3d05-4021-9983-1865cef7200f" containerName="extract" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.741293 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="3f58a827-137d-4b77-bdc6-965e778f362f" containerName="extract" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.742084 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.745489 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"default-dockercfg-vmwhc" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.754578 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c"] Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.830234 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.890674 4875 generic.go:334] "Generic (PLEG): container finished" podID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" containerID="6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e" exitCode=0 Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.890732 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cz5kn" event={"ID":"a1aeac16-3975-48e6-84b4-00aa7aee70a1","Type":"ContainerDied","Data":"6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e"} Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.890761 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-cz5kn" event={"ID":"a1aeac16-3975-48e6-84b4-00aa7aee70a1","Type":"ContainerDied","Data":"5d2df86196c5b8e305306f5323a5d2e066d641f98f02477345f30ae9873e5ce8"} Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.890779 4875 scope.go:117] "RemoveContainer" containerID="6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.890886 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-cz5kn" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.893850 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ptghh" event={"ID":"0cdb1c9a-f742-4e18-9e17-3bed2687ff54","Type":"ContainerStarted","Data":"32ecf1684834de5de51bd29abf4d72600f538d35c581477344b3661d184c40a7"} Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.907302 4875 scope.go:117] "RemoveContainer" containerID="ab4081b3c3350348d355210632e5a849764206736123745b137b0ac00c6a3f49" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.914980 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-ptghh" podStartSLOduration=2.394130014 podStartE2EDuration="4.914964859s" podCreationTimestamp="2025-11-26 00:20:09 +0000 UTC" firstStartedPulling="2025-11-26 00:20:10.842846993 +0000 UTC m=+784.032822688" lastFinishedPulling="2025-11-26 00:20:13.363681828 +0000 UTC m=+786.553657533" observedRunningTime="2025-11-26 00:20:13.914168417 +0000 UTC m=+787.104144112" watchObservedRunningTime="2025-11-26 00:20:13.914964859 +0000 UTC m=+787.104940554" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.928344 4875 scope.go:117] "RemoveContainer" containerID="f427acb753542eb5f8d946cb49b74a5d13d78fd978b299adb6bd5662ab0ed816" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.928340 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.928462 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.928496 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-slv96\" (UniqueName: \"kubernetes.io/projected/0255b072-0216-4a62-891e-dd3e301cf793-kube-api-access-slv96\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.947608 4875 scope.go:117] "RemoveContainer" containerID="6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e" Nov 26 00:20:13 crc kubenswrapper[4875]: E1126 00:20:13.947944 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e\": container with ID starting with 6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e not found: ID does not exist" containerID="6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.947977 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e"} err="failed to get container status \"6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e\": rpc error: code = NotFound desc = could not find container \"6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e\": container with ID starting with 6e3ae5d3b486fe3b45536f50aaed0acad57d5554f89127acd7425a4999675a7e not found: ID does not exist" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.948001 4875 scope.go:117] "RemoveContainer" containerID="ab4081b3c3350348d355210632e5a849764206736123745b137b0ac00c6a3f49" Nov 26 00:20:13 crc kubenswrapper[4875]: E1126 00:20:13.948214 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab4081b3c3350348d355210632e5a849764206736123745b137b0ac00c6a3f49\": container with ID starting with ab4081b3c3350348d355210632e5a849764206736123745b137b0ac00c6a3f49 not found: ID does not exist" containerID="ab4081b3c3350348d355210632e5a849764206736123745b137b0ac00c6a3f49" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.948239 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab4081b3c3350348d355210632e5a849764206736123745b137b0ac00c6a3f49"} err="failed to get container status \"ab4081b3c3350348d355210632e5a849764206736123745b137b0ac00c6a3f49\": rpc error: code = NotFound desc = could not find container \"ab4081b3c3350348d355210632e5a849764206736123745b137b0ac00c6a3f49\": container with ID starting with ab4081b3c3350348d355210632e5a849764206736123745b137b0ac00c6a3f49 not found: ID does not exist" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.948256 4875 scope.go:117] "RemoveContainer" containerID="f427acb753542eb5f8d946cb49b74a5d13d78fd978b299adb6bd5662ab0ed816" Nov 26 00:20:13 crc kubenswrapper[4875]: E1126 00:20:13.948480 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f427acb753542eb5f8d946cb49b74a5d13d78fd978b299adb6bd5662ab0ed816\": container with ID starting with f427acb753542eb5f8d946cb49b74a5d13d78fd978b299adb6bd5662ab0ed816 not found: ID does not exist" containerID="f427acb753542eb5f8d946cb49b74a5d13d78fd978b299adb6bd5662ab0ed816" Nov 26 00:20:13 crc kubenswrapper[4875]: I1126 00:20:13.948542 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f427acb753542eb5f8d946cb49b74a5d13d78fd978b299adb6bd5662ab0ed816"} err="failed to get container status \"f427acb753542eb5f8d946cb49b74a5d13d78fd978b299adb6bd5662ab0ed816\": rpc error: code = NotFound desc = could not find container \"f427acb753542eb5f8d946cb49b74a5d13d78fd978b299adb6bd5662ab0ed816\": container with ID starting with f427acb753542eb5f8d946cb49b74a5d13d78fd978b299adb6bd5662ab0ed816 not found: ID does not exist" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.029074 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-pzfzt\" (UniqueName: \"kubernetes.io/projected/a1aeac16-3975-48e6-84b4-00aa7aee70a1-kube-api-access-pzfzt\") pod \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.029154 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-utilities\") pod \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.029222 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-catalog-content\") pod \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\" (UID: \"a1aeac16-3975-48e6-84b4-00aa7aee70a1\") " Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.029390 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.029427 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.029454 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-slv96\" (UniqueName: \"kubernetes.io/projected/0255b072-0216-4a62-891e-dd3e301cf793-kube-api-access-slv96\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.030282 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-utilities" (OuterVolumeSpecName: "utilities") pod "a1aeac16-3975-48e6-84b4-00aa7aee70a1" (UID: "a1aeac16-3975-48e6-84b4-00aa7aee70a1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.030337 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-util\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.030716 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-bundle\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.035532 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/a1aeac16-3975-48e6-84b4-00aa7aee70a1-kube-api-access-pzfzt" (OuterVolumeSpecName: "kube-api-access-pzfzt") pod "a1aeac16-3975-48e6-84b4-00aa7aee70a1" (UID: "a1aeac16-3975-48e6-84b4-00aa7aee70a1"). InnerVolumeSpecName "kube-api-access-pzfzt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.064370 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-slv96\" (UniqueName: \"kubernetes.io/projected/0255b072-0216-4a62-891e-dd3e301cf793-kube-api-access-slv96\") pod \"1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.131018 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.131050 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-pzfzt\" (UniqueName: \"kubernetes.io/projected/a1aeac16-3975-48e6-84b4-00aa7aee70a1-kube-api-access-pzfzt\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.356206 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.374772 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "a1aeac16-3975-48e6-84b4-00aa7aee70a1" (UID: "a1aeac16-3975-48e6-84b4-00aa7aee70a1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.433178 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/a1aeac16-3975-48e6-84b4-00aa7aee70a1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.529052 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-cz5kn"] Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.535105 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-cz5kn"] Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.756697 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c"] Nov 26 00:20:14 crc kubenswrapper[4875]: W1126 00:20:14.761236 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0255b072_0216_4a62_891e_dd3e301cf793.slice/crio-b475545b04796f959a67545aa8b94d7ec5226864e66017f4ddf62d7984cace0c WatchSource:0}: Error finding container b475545b04796f959a67545aa8b94d7ec5226864e66017f4ddf62d7984cace0c: Status 404 returned error can't find the container with id b475545b04796f959a67545aa8b94d7ec5226864e66017f4ddf62d7984cace0c Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.902474 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" event={"ID":"0255b072-0216-4a62-891e-dd3e301cf793","Type":"ContainerStarted","Data":"c5353008be33f18ad0596d6797178c98f9744684c698f53807ba6ac72d0378b3"} Nov 26 00:20:14 crc kubenswrapper[4875]: I1126 00:20:14.902543 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" event={"ID":"0255b072-0216-4a62-891e-dd3e301cf793","Type":"ContainerStarted","Data":"b475545b04796f959a67545aa8b94d7ec5226864e66017f4ddf62d7984cace0c"} Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.213311 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph"] Nov 26 00:20:15 crc kubenswrapper[4875]: E1126 00:20:15.213630 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" containerName="extract-utilities" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.213654 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" containerName="extract-utilities" Nov 26 00:20:15 crc kubenswrapper[4875]: E1126 00:20:15.213672 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" containerName="registry-server" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.213679 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" containerName="registry-server" Nov 26 00:20:15 crc kubenswrapper[4875]: E1126 00:20:15.213689 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" containerName="extract-content" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.213696 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" containerName="extract-content" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.213827 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" containerName="registry-server" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.214270 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.216666 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.217129 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-t7n6l" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.217191 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.228649 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph"] Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.331768 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2"] Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.332510 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.334375 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-p4v7d" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.335066 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.343713 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-9spdb\" (UniqueName: \"kubernetes.io/projected/7361ac7e-9f8b-43fd-a44b-a2090a837e24-kube-api-access-9spdb\") pod \"obo-prometheus-operator-668cf9dfbb-r47ph\" (UID: \"7361ac7e-9f8b-43fd-a44b-a2090a837e24\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.343807 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr"] Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.344551 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.370164 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr"] Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.402566 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2"] Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.445023 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3e9dd4ce-077d-428c-9bef-75e00aef2c77-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr\" (UID: \"3e9dd4ce-077d-428c-9bef-75e00aef2c77\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.445309 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-9spdb\" (UniqueName: \"kubernetes.io/projected/7361ac7e-9f8b-43fd-a44b-a2090a837e24-kube-api-access-9spdb\") pod \"obo-prometheus-operator-668cf9dfbb-r47ph\" (UID: \"7361ac7e-9f8b-43fd-a44b-a2090a837e24\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.445708 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f3a486b4-3763-49bd-a2a5-4659c9e69635-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2\" (UID: \"f3a486b4-3763-49bd-a2a5-4659c9e69635\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.445858 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f3a486b4-3763-49bd-a2a5-4659c9e69635-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2\" (UID: \"f3a486b4-3763-49bd-a2a5-4659c9e69635\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.445994 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3e9dd4ce-077d-428c-9bef-75e00aef2c77-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr\" (UID: \"3e9dd4ce-077d-428c-9bef-75e00aef2c77\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.479269 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-9spdb\" (UniqueName: \"kubernetes.io/projected/7361ac7e-9f8b-43fd-a44b-a2090a837e24-kube-api-access-9spdb\") pod \"obo-prometheus-operator-668cf9dfbb-r47ph\" (UID: \"7361ac7e-9f8b-43fd-a44b-a2090a837e24\") " pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.486192 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-n99xk"] Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.486924 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.488895 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.497527 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-ghm7k" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.504458 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-n99xk"] Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.530735 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.534662 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="a1aeac16-3975-48e6-84b4-00aa7aee70a1" path="/var/lib/kubelet/pods/a1aeac16-3975-48e6-84b4-00aa7aee70a1/volumes" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.547151 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3e9dd4ce-077d-428c-9bef-75e00aef2c77-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr\" (UID: \"3e9dd4ce-077d-428c-9bef-75e00aef2c77\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.547208 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3e9dd4ce-077d-428c-9bef-75e00aef2c77-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr\" (UID: \"3e9dd4ce-077d-428c-9bef-75e00aef2c77\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.547249 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f3a486b4-3763-49bd-a2a5-4659c9e69635-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2\" (UID: \"f3a486b4-3763-49bd-a2a5-4659c9e69635\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.547285 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f3a486b4-3763-49bd-a2a5-4659c9e69635-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2\" (UID: \"f3a486b4-3763-49bd-a2a5-4659c9e69635\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.550517 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/3e9dd4ce-077d-428c-9bef-75e00aef2c77-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr\" (UID: \"3e9dd4ce-077d-428c-9bef-75e00aef2c77\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.550527 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/3e9dd4ce-077d-428c-9bef-75e00aef2c77-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr\" (UID: \"3e9dd4ce-077d-428c-9bef-75e00aef2c77\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.550797 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/f3a486b4-3763-49bd-a2a5-4659c9e69635-apiservice-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2\" (UID: \"f3a486b4-3763-49bd-a2a5-4659c9e69635\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.552839 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/f3a486b4-3763-49bd-a2a5-4659c9e69635-webhook-cert\") pod \"obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2\" (UID: \"f3a486b4-3763-49bd-a2a5-4659c9e69635\") " pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.630264 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operators/perses-operator-5446b9c989-b4svl"] Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.631125 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-b4svl" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.632637 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-hzg29" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.648201 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fnjjj\" (UniqueName: \"kubernetes.io/projected/4a6616b3-09c5-43c2-92a5-f01a09ab6c44-kube-api-access-fnjjj\") pod \"observability-operator-d8bb48f5d-n99xk\" (UID: \"4a6616b3-09c5-43c2-92a5-f01a09ab6c44\") " pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.648265 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4a6616b3-09c5-43c2-92a5-f01a09ab6c44-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-n99xk\" (UID: \"4a6616b3-09c5-43c2-92a5-f01a09ab6c44\") " pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.654406 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.659207 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-b4svl"] Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.661867 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.754283 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph"] Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.757613 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b82fw\" (UniqueName: \"kubernetes.io/projected/9ce8401b-600e-424e-accb-534de2c4ae52-kube-api-access-b82fw\") pod \"perses-operator-5446b9c989-b4svl\" (UID: \"9ce8401b-600e-424e-accb-534de2c4ae52\") " pod="openshift-operators/perses-operator-5446b9c989-b4svl" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.757693 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4a6616b3-09c5-43c2-92a5-f01a09ab6c44-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-n99xk\" (UID: \"4a6616b3-09c5-43c2-92a5-f01a09ab6c44\") " pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.757774 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/9ce8401b-600e-424e-accb-534de2c4ae52-openshift-service-ca\") pod \"perses-operator-5446b9c989-b4svl\" (UID: \"9ce8401b-600e-424e-accb-534de2c4ae52\") " pod="openshift-operators/perses-operator-5446b9c989-b4svl" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.757846 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fnjjj\" (UniqueName: \"kubernetes.io/projected/4a6616b3-09c5-43c2-92a5-f01a09ab6c44-kube-api-access-fnjjj\") pod \"observability-operator-d8bb48f5d-n99xk\" (UID: \"4a6616b3-09c5-43c2-92a5-f01a09ab6c44\") " pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.765182 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"observability-operator-tls\" (UniqueName: \"kubernetes.io/secret/4a6616b3-09c5-43c2-92a5-f01a09ab6c44-observability-operator-tls\") pod \"observability-operator-d8bb48f5d-n99xk\" (UID: \"4a6616b3-09c5-43c2-92a5-f01a09ab6c44\") " pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.779327 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fnjjj\" (UniqueName: \"kubernetes.io/projected/4a6616b3-09c5-43c2-92a5-f01a09ab6c44-kube-api-access-fnjjj\") pod \"observability-operator-d8bb48f5d-n99xk\" (UID: \"4a6616b3-09c5-43c2-92a5-f01a09ab6c44\") " pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.799786 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.859022 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/9ce8401b-600e-424e-accb-534de2c4ae52-openshift-service-ca\") pod \"perses-operator-5446b9c989-b4svl\" (UID: \"9ce8401b-600e-424e-accb-534de2c4ae52\") " pod="openshift-operators/perses-operator-5446b9c989-b4svl" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.859470 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b82fw\" (UniqueName: \"kubernetes.io/projected/9ce8401b-600e-424e-accb-534de2c4ae52-kube-api-access-b82fw\") pod \"perses-operator-5446b9c989-b4svl\" (UID: \"9ce8401b-600e-424e-accb-534de2c4ae52\") " pod="openshift-operators/perses-operator-5446b9c989-b4svl" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.862626 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"openshift-service-ca\" (UniqueName: \"kubernetes.io/configmap/9ce8401b-600e-424e-accb-534de2c4ae52-openshift-service-ca\") pod \"perses-operator-5446b9c989-b4svl\" (UID: \"9ce8401b-600e-424e-accb-534de2c4ae52\") " pod="openshift-operators/perses-operator-5446b9c989-b4svl" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.883196 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b82fw\" (UniqueName: \"kubernetes.io/projected/9ce8401b-600e-424e-accb-534de2c4ae52-kube-api-access-b82fw\") pod \"perses-operator-5446b9c989-b4svl\" (UID: \"9ce8401b-600e-424e-accb-534de2c4ae52\") " pod="openshift-operators/perses-operator-5446b9c989-b4svl" Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.917177 4875 generic.go:334] "Generic (PLEG): container finished" podID="0255b072-0216-4a62-891e-dd3e301cf793" containerID="c5353008be33f18ad0596d6797178c98f9744684c698f53807ba6ac72d0378b3" exitCode=0 Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.917640 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" event={"ID":"0255b072-0216-4a62-891e-dd3e301cf793","Type":"ContainerDied","Data":"c5353008be33f18ad0596d6797178c98f9744684c698f53807ba6ac72d0378b3"} Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.920977 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph" event={"ID":"7361ac7e-9f8b-43fd-a44b-a2090a837e24","Type":"ContainerStarted","Data":"3ee3be268fedcab47a3a78fe7843a981ca2a48379f17c496b580481fb16c91bc"} Nov 26 00:20:15 crc kubenswrapper[4875]: I1126 00:20:15.948704 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operators/perses-operator-5446b9c989-b4svl" Nov 26 00:20:16 crc kubenswrapper[4875]: I1126 00:20:16.117122 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/observability-operator-d8bb48f5d-n99xk"] Nov 26 00:20:16 crc kubenswrapper[4875]: W1126 00:20:16.125626 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod4a6616b3_09c5_43c2_92a5_f01a09ab6c44.slice/crio-b2c27eeed5e4f6c8ff91f975b9ed5e3af3cccf523b1e3e350b914e148052c664 WatchSource:0}: Error finding container b2c27eeed5e4f6c8ff91f975b9ed5e3af3cccf523b1e3e350b914e148052c664: Status 404 returned error can't find the container with id b2c27eeed5e4f6c8ff91f975b9ed5e3af3cccf523b1e3e350b914e148052c664 Nov 26 00:20:16 crc kubenswrapper[4875]: I1126 00:20:16.161922 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr"] Nov 26 00:20:16 crc kubenswrapper[4875]: W1126 00:20:16.176588 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod3e9dd4ce_077d_428c_9bef_75e00aef2c77.slice/crio-8842f8b024d972b0ea4b8ad91dcb6bbc3dbaf0471d59302ed371d1a25b7f3cb8 WatchSource:0}: Error finding container 8842f8b024d972b0ea4b8ad91dcb6bbc3dbaf0471d59302ed371d1a25b7f3cb8: Status 404 returned error can't find the container with id 8842f8b024d972b0ea4b8ad91dcb6bbc3dbaf0471d59302ed371d1a25b7f3cb8 Nov 26 00:20:16 crc kubenswrapper[4875]: I1126 00:20:16.205703 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2"] Nov 26 00:20:16 crc kubenswrapper[4875]: I1126 00:20:16.220856 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operators/perses-operator-5446b9c989-b4svl"] Nov 26 00:20:16 crc kubenswrapper[4875]: W1126 00:20:16.264039 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod9ce8401b_600e_424e_accb_534de2c4ae52.slice/crio-4f49c0d546b046a60d40c7344516bed63ab1cea762f55680f87870abbd621c84 WatchSource:0}: Error finding container 4f49c0d546b046a60d40c7344516bed63ab1cea762f55680f87870abbd621c84: Status 404 returned error can't find the container with id 4f49c0d546b046a60d40c7344516bed63ab1cea762f55680f87870abbd621c84 Nov 26 00:20:16 crc kubenswrapper[4875]: I1126 00:20:16.930368 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr" event={"ID":"3e9dd4ce-077d-428c-9bef-75e00aef2c77","Type":"ContainerStarted","Data":"8842f8b024d972b0ea4b8ad91dcb6bbc3dbaf0471d59302ed371d1a25b7f3cb8"} Nov 26 00:20:16 crc kubenswrapper[4875]: I1126 00:20:16.931683 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-b4svl" event={"ID":"9ce8401b-600e-424e-accb-534de2c4ae52","Type":"ContainerStarted","Data":"4f49c0d546b046a60d40c7344516bed63ab1cea762f55680f87870abbd621c84"} Nov 26 00:20:16 crc kubenswrapper[4875]: I1126 00:20:16.932953 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" event={"ID":"4a6616b3-09c5-43c2-92a5-f01a09ab6c44","Type":"ContainerStarted","Data":"b2c27eeed5e4f6c8ff91f975b9ed5e3af3cccf523b1e3e350b914e148052c664"} Nov 26 00:20:16 crc kubenswrapper[4875]: I1126 00:20:16.934278 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2" event={"ID":"f3a486b4-3763-49bd-a2a5-4659c9e69635","Type":"ContainerStarted","Data":"e8806a6eb3427097cdcf52f4cd985b8be5d4aa21425a8954a49685e2b2668ff7"} Nov 26 00:20:20 crc kubenswrapper[4875]: I1126 00:20:20.044969 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:20 crc kubenswrapper[4875]: I1126 00:20:20.045037 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:20 crc kubenswrapper[4875]: I1126 00:20:20.093123 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:21 crc kubenswrapper[4875]: I1126 00:20:21.017092 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:21 crc kubenswrapper[4875]: I1126 00:20:21.878516 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ptghh"] Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.319107 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/elastic-operator-bf7b77755-xjqrk"] Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.319946 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.322397 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elastic-operator-service-cert" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.322648 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elastic-operator-dockercfg-48pn2" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.323324 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"kube-root-ca.crt" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.324011 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"openshift-service-ca.crt" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.336820 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/elastic-operator-bf7b77755-xjqrk"] Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.460724 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5fdc5235-fab2-4d96-8526-1ca2270a92ac-webhook-cert\") pod \"elastic-operator-bf7b77755-xjqrk\" (UID: \"5fdc5235-fab2-4d96-8526-1ca2270a92ac\") " pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.460802 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5fdc5235-fab2-4d96-8526-1ca2270a92ac-apiservice-cert\") pod \"elastic-operator-bf7b77755-xjqrk\" (UID: \"5fdc5235-fab2-4d96-8526-1ca2270a92ac\") " pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.460920 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lxnc7\" (UniqueName: \"kubernetes.io/projected/5fdc5235-fab2-4d96-8526-1ca2270a92ac-kube-api-access-lxnc7\") pod \"elastic-operator-bf7b77755-xjqrk\" (UID: \"5fdc5235-fab2-4d96-8526-1ca2270a92ac\") " pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.561577 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5fdc5235-fab2-4d96-8526-1ca2270a92ac-apiservice-cert\") pod \"elastic-operator-bf7b77755-xjqrk\" (UID: \"5fdc5235-fab2-4d96-8526-1ca2270a92ac\") " pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.561651 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lxnc7\" (UniqueName: \"kubernetes.io/projected/5fdc5235-fab2-4d96-8526-1ca2270a92ac-kube-api-access-lxnc7\") pod \"elastic-operator-bf7b77755-xjqrk\" (UID: \"5fdc5235-fab2-4d96-8526-1ca2270a92ac\") " pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.561679 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5fdc5235-fab2-4d96-8526-1ca2270a92ac-webhook-cert\") pod \"elastic-operator-bf7b77755-xjqrk\" (UID: \"5fdc5235-fab2-4d96-8526-1ca2270a92ac\") " pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.571325 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"webhook-cert\" (UniqueName: \"kubernetes.io/secret/5fdc5235-fab2-4d96-8526-1ca2270a92ac-webhook-cert\") pod \"elastic-operator-bf7b77755-xjqrk\" (UID: \"5fdc5235-fab2-4d96-8526-1ca2270a92ac\") " pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.583013 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"apiservice-cert\" (UniqueName: \"kubernetes.io/secret/5fdc5235-fab2-4d96-8526-1ca2270a92ac-apiservice-cert\") pod \"elastic-operator-bf7b77755-xjqrk\" (UID: \"5fdc5235-fab2-4d96-8526-1ca2270a92ac\") " pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.609136 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lxnc7\" (UniqueName: \"kubernetes.io/projected/5fdc5235-fab2-4d96-8526-1ca2270a92ac-kube-api-access-lxnc7\") pod \"elastic-operator-bf7b77755-xjqrk\" (UID: \"5fdc5235-fab2-4d96-8526-1ca2270a92ac\") " pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.638427 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" Nov 26 00:20:22 crc kubenswrapper[4875]: I1126 00:20:22.976567 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-ptghh" podUID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" containerName="registry-server" containerID="cri-o://32ecf1684834de5de51bd29abf4d72600f538d35c581477344b3661d184c40a7" gracePeriod=2 Nov 26 00:20:23 crc kubenswrapper[4875]: I1126 00:20:23.984946 4875 generic.go:334] "Generic (PLEG): container finished" podID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" containerID="32ecf1684834de5de51bd29abf4d72600f538d35c581477344b3661d184c40a7" exitCode=0 Nov 26 00:20:23 crc kubenswrapper[4875]: I1126 00:20:23.985035 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ptghh" event={"ID":"0cdb1c9a-f742-4e18-9e17-3bed2687ff54","Type":"ContainerDied","Data":"32ecf1684834de5de51bd29abf4d72600f538d35c581477344b3661d184c40a7"} Nov 26 00:20:26 crc kubenswrapper[4875]: I1126 00:20:26.533725 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/interconnect-operator-5bb49f789d-b22zm"] Nov 26 00:20:26 crc kubenswrapper[4875]: I1126 00:20:26.535891 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/interconnect-operator-5bb49f789d-b22zm" Nov 26 00:20:26 crc kubenswrapper[4875]: I1126 00:20:26.541114 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"interconnect-operator-dockercfg-4b8fv" Nov 26 00:20:26 crc kubenswrapper[4875]: I1126 00:20:26.544888 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/interconnect-operator-5bb49f789d-b22zm"] Nov 26 00:20:26 crc kubenswrapper[4875]: I1126 00:20:26.612072 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mfdbn\" (UniqueName: \"kubernetes.io/projected/2ca17085-9ed1-4682-be93-56e571db4862-kube-api-access-mfdbn\") pod \"interconnect-operator-5bb49f789d-b22zm\" (UID: \"2ca17085-9ed1-4682-be93-56e571db4862\") " pod="service-telemetry/interconnect-operator-5bb49f789d-b22zm" Nov 26 00:20:26 crc kubenswrapper[4875]: I1126 00:20:26.712857 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mfdbn\" (UniqueName: \"kubernetes.io/projected/2ca17085-9ed1-4682-be93-56e571db4862-kube-api-access-mfdbn\") pod \"interconnect-operator-5bb49f789d-b22zm\" (UID: \"2ca17085-9ed1-4682-be93-56e571db4862\") " pod="service-telemetry/interconnect-operator-5bb49f789d-b22zm" Nov 26 00:20:26 crc kubenswrapper[4875]: I1126 00:20:26.737472 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mfdbn\" (UniqueName: \"kubernetes.io/projected/2ca17085-9ed1-4682-be93-56e571db4862-kube-api-access-mfdbn\") pod \"interconnect-operator-5bb49f789d-b22zm\" (UID: \"2ca17085-9ed1-4682-be93-56e571db4862\") " pod="service-telemetry/interconnect-operator-5bb49f789d-b22zm" Nov 26 00:20:26 crc kubenswrapper[4875]: I1126 00:20:26.885140 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/interconnect-operator-5bb49f789d-b22zm" Nov 26 00:20:27 crc kubenswrapper[4875]: I1126 00:20:27.890342 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.005712 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-ptghh" event={"ID":"0cdb1c9a-f742-4e18-9e17-3bed2687ff54","Type":"ContainerDied","Data":"9e85e825c175f1c7cfdbad6eed46d4a1eb363ecdb6ddc986f70275cbdf67ffee"} Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.005762 4875 scope.go:117] "RemoveContainer" containerID="32ecf1684834de5de51bd29abf4d72600f538d35c581477344b3661d184c40a7" Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.005885 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-ptghh" Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.028481 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-utilities\") pod \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.028583 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-catalog-content\") pod \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.028728 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-77c5t\" (UniqueName: \"kubernetes.io/projected/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-kube-api-access-77c5t\") pod \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\" (UID: \"0cdb1c9a-f742-4e18-9e17-3bed2687ff54\") " Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.029889 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-utilities" (OuterVolumeSpecName: "utilities") pod "0cdb1c9a-f742-4e18-9e17-3bed2687ff54" (UID: "0cdb1c9a-f742-4e18-9e17-3bed2687ff54"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.035648 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-kube-api-access-77c5t" (OuterVolumeSpecName: "kube-api-access-77c5t") pod "0cdb1c9a-f742-4e18-9e17-3bed2687ff54" (UID: "0cdb1c9a-f742-4e18-9e17-3bed2687ff54"). InnerVolumeSpecName "kube-api-access-77c5t". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.100528 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "0cdb1c9a-f742-4e18-9e17-3bed2687ff54" (UID: "0cdb1c9a-f742-4e18-9e17-3bed2687ff54"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.130731 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.130765 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.130779 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-77c5t\" (UniqueName: \"kubernetes.io/projected/0cdb1c9a-f742-4e18-9e17-3bed2687ff54-kube-api-access-77c5t\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.333974 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-ptghh"] Nov 26 00:20:28 crc kubenswrapper[4875]: I1126 00:20:28.337312 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-ptghh"] Nov 26 00:20:29 crc kubenswrapper[4875]: I1126 00:20:29.542625 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" path="/var/lib/kubelet/pods/0cdb1c9a-f742-4e18-9e17-3bed2687ff54/volumes" Nov 26 00:20:31 crc kubenswrapper[4875]: E1126 00:20:31.422352 4875 log.go:32] "PullImage from image service failed" err="rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" image="registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:203cf5b9dc1460f09e75f58d8b5cf7df5e57c18c8c6a41c14b5e8977d83263f3" Nov 26 00:20:31 crc kubenswrapper[4875]: E1126 00:20:31.422540 4875 kuberuntime_manager.go:1274] "Unhandled Error" err="container &Container{Name:prometheus-operator,Image:registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:203cf5b9dc1460f09e75f58d8b5cf7df5e57c18c8c6a41c14b5e8977d83263f3,Command:[],Args:[--prometheus-config-reloader=$(RELATED_IMAGE_PROMETHEUS_CONFIG_RELOADER) --prometheus-instance-selector=app.kubernetes.io/managed-by=observability-operator --alertmanager-instance-selector=app.kubernetes.io/managed-by=observability-operator --thanos-ruler-instance-selector=app.kubernetes.io/managed-by=observability-operator],WorkingDir:,Ports:[]ContainerPort{ContainerPort{Name:http,HostPort:0,ContainerPort:8080,Protocol:TCP,HostIP:,},},Env:[]EnvVar{EnvVar{Name:GOGC,Value:30,ValueFrom:nil,},EnvVar{Name:RELATED_IMAGE_PROMETHEUS_CONFIG_RELOADER,Value:registry.redhat.io/cluster-observability-operator/obo-prometheus-operator-prometheus-config-reloader-rhel9@sha256:1133c973c7472c665f910a722e19c8e2e27accb34b90fab67f14548627ce9c62,ValueFrom:nil,},EnvVar{Name:OPERATOR_CONDITION_NAME,Value:cluster-observability-operator.v1.3.0,ValueFrom:nil,},},Resources:ResourceRequirements{Limits:ResourceList{cpu: {{100 -3} {} 100m DecimalSI},memory: {{524288000 0} {} 500Mi BinarySI},},Requests:ResourceList{cpu: {{5 -3} {} 5m DecimalSI},memory: {{157286400 0} {} 150Mi BinarySI},},Claims:[]ResourceClaim{},},VolumeMounts:[]VolumeMount{VolumeMount{Name:kube-api-access-9spdb,ReadOnly:true,MountPath:/var/run/secrets/kubernetes.io/serviceaccount,SubPath:,MountPropagation:nil,SubPathExpr:,RecursiveReadOnly:nil,},},LivenessProbe:nil,ReadinessProbe:nil,Lifecycle:nil,TerminationMessagePath:/dev/termination-log,ImagePullPolicy:IfNotPresent,SecurityContext:&SecurityContext{Capabilities:&Capabilities{Add:[],Drop:[ALL],},Privileged:nil,SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,ReadOnlyRootFilesystem:*true,AllowPrivilegeEscalation:*false,RunAsGroup:nil,ProcMount:nil,WindowsOptions:nil,SeccompProfile:nil,AppArmorProfile:nil,},Stdin:false,StdinOnce:false,TTY:false,EnvFrom:[]EnvFromSource{},TerminationMessagePolicy:FallbackToLogsOnError,VolumeDevices:[]VolumeDevice{},StartupProbe:nil,ResizePolicy:[]ContainerResizePolicy{},RestartPolicy:nil,} start failed in pod obo-prometheus-operator-668cf9dfbb-r47ph_openshift-operators(7361ac7e-9f8b-43fd-a44b-a2090a837e24): ErrImagePull: rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled" logger="UnhandledError" Nov 26 00:20:31 crc kubenswrapper[4875]: E1126 00:20:31.423732 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator\" with ErrImagePull: \"rpc error: code = Canceled desc = copying system image from manifest list: copying config: context canceled\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph" podUID="7361ac7e-9f8b-43fd-a44b-a2090a837e24" Nov 26 00:20:31 crc kubenswrapper[4875]: I1126 00:20:31.468400 4875 scope.go:117] "RemoveContainer" containerID="fd0d75a072e5c664a2cb71353bcaec9ac21de36d5fbdb70609c449d02ccff059" Nov 26 00:20:31 crc kubenswrapper[4875]: I1126 00:20:31.572276 4875 scope.go:117] "RemoveContainer" containerID="33eec501934da5fd333f24bbbf4a37541f6a15a05e0136fd375845162a8f1e5a" Nov 26 00:20:31 crc kubenswrapper[4875]: I1126 00:20:31.801642 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/elastic-operator-bf7b77755-xjqrk"] Nov 26 00:20:31 crc kubenswrapper[4875]: I1126 00:20:31.845894 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/interconnect-operator-5bb49f789d-b22zm"] Nov 26 00:20:32 crc kubenswrapper[4875]: I1126 00:20:32.028100 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/perses-operator-5446b9c989-b4svl" event={"ID":"9ce8401b-600e-424e-accb-534de2c4ae52","Type":"ContainerStarted","Data":"bc3c51a2d7c9aa83878e2e232543e46f7623f08ac2a4f7b478776069dd67f491"} Nov 26 00:20:32 crc kubenswrapper[4875]: I1126 00:20:32.028236 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/perses-operator-5446b9c989-b4svl" Nov 26 00:20:32 crc kubenswrapper[4875]: I1126 00:20:32.029648 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" event={"ID":"4a6616b3-09c5-43c2-92a5-f01a09ab6c44","Type":"ContainerStarted","Data":"214cede802597b44976ab182f1aa0afdaf171b0bfd8d400b812049d8db6ba5ef"} Nov 26 00:20:32 crc kubenswrapper[4875]: I1126 00:20:32.030656 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" event={"ID":"5fdc5235-fab2-4d96-8526-1ca2270a92ac","Type":"ContainerStarted","Data":"343c2360d81aa12f896b0aa7bde5995a7e9f68b60cf62829af1ad956bbb2d4a0"} Nov 26 00:20:32 crc kubenswrapper[4875]: I1126 00:20:32.032731 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/interconnect-operator-5bb49f789d-b22zm" event={"ID":"2ca17085-9ed1-4682-be93-56e571db4862","Type":"ContainerStarted","Data":"90ad729437d64964632dd20acd5491a30ba3c0e4e61e67fcfdd70fd53290a7e9"} Nov 26 00:20:32 crc kubenswrapper[4875]: I1126 00:20:32.040742 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" event={"ID":"0255b072-0216-4a62-891e-dd3e301cf793","Type":"ContainerStarted","Data":"b1215ef9ea9b52ca4fa58ff922091f9e984ec16ae773e70cddfb49cf10d0188d"} Nov 26 00:20:32 crc kubenswrapper[4875]: E1126 00:20:32.051050 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"prometheus-operator\" with ImagePullBackOff: \"Back-off pulling image \\\"registry.redhat.io/cluster-observability-operator/obo-prometheus-rhel9-operator@sha256:203cf5b9dc1460f09e75f58d8b5cf7df5e57c18c8c6a41c14b5e8977d83263f3\\\"\"" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph" podUID="7361ac7e-9f8b-43fd-a44b-a2090a837e24" Nov 26 00:20:32 crc kubenswrapper[4875]: I1126 00:20:32.060478 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/perses-operator-5446b9c989-b4svl" podStartSLOduration=1.835873158 podStartE2EDuration="17.060460175s" podCreationTimestamp="2025-11-26 00:20:15 +0000 UTC" firstStartedPulling="2025-11-26 00:20:16.266562215 +0000 UTC m=+789.456537910" lastFinishedPulling="2025-11-26 00:20:31.491149232 +0000 UTC m=+804.681124927" observedRunningTime="2025-11-26 00:20:32.054314577 +0000 UTC m=+805.244290262" watchObservedRunningTime="2025-11-26 00:20:32.060460175 +0000 UTC m=+805.250435870" Nov 26 00:20:33 crc kubenswrapper[4875]: I1126 00:20:33.055915 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2" event={"ID":"f3a486b4-3763-49bd-a2a5-4659c9e69635","Type":"ContainerStarted","Data":"b5584620ef3228bcd7ebec80a5aa51aa37f2343c66bd0a4596a7b15737c505ad"} Nov 26 00:20:33 crc kubenswrapper[4875]: I1126 00:20:33.058705 4875 generic.go:334] "Generic (PLEG): container finished" podID="0255b072-0216-4a62-891e-dd3e301cf793" containerID="b1215ef9ea9b52ca4fa58ff922091f9e984ec16ae773e70cddfb49cf10d0188d" exitCode=0 Nov 26 00:20:33 crc kubenswrapper[4875]: I1126 00:20:33.058779 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" event={"ID":"0255b072-0216-4a62-891e-dd3e301cf793","Type":"ContainerDied","Data":"b1215ef9ea9b52ca4fa58ff922091f9e984ec16ae773e70cddfb49cf10d0188d"} Nov 26 00:20:33 crc kubenswrapper[4875]: I1126 00:20:33.060364 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr" event={"ID":"3e9dd4ce-077d-428c-9bef-75e00aef2c77","Type":"ContainerStarted","Data":"e29ba70a70b9d6e4e19d4cff8d9c92d900b704b1f5483685b10f899f06ad0477"} Nov 26 00:20:33 crc kubenswrapper[4875]: I1126 00:20:33.078234 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-8zdn2" podStartSLOduration=2.812427501 podStartE2EDuration="18.078216009s" podCreationTimestamp="2025-11-26 00:20:15 +0000 UTC" firstStartedPulling="2025-11-26 00:20:16.226723801 +0000 UTC m=+789.416699496" lastFinishedPulling="2025-11-26 00:20:31.492512319 +0000 UTC m=+804.682488004" observedRunningTime="2025-11-26 00:20:33.07455278 +0000 UTC m=+806.264528475" watchObservedRunningTime="2025-11-26 00:20:33.078216009 +0000 UTC m=+806.268191704" Nov 26 00:20:33 crc kubenswrapper[4875]: I1126 00:20:33.098712 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-admission-webhook-7bf58fcd9b-n8czr" podStartSLOduration=2.764624428 podStartE2EDuration="18.098690266s" podCreationTimestamp="2025-11-26 00:20:15 +0000 UTC" firstStartedPulling="2025-11-26 00:20:16.183293048 +0000 UTC m=+789.373268743" lastFinishedPulling="2025-11-26 00:20:31.517358886 +0000 UTC m=+804.707334581" observedRunningTime="2025-11-26 00:20:33.096040294 +0000 UTC m=+806.286015989" watchObservedRunningTime="2025-11-26 00:20:33.098690266 +0000 UTC m=+806.288665961" Nov 26 00:20:33 crc kubenswrapper[4875]: I1126 00:20:33.139114 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" podStartSLOduration=2.707682718 podStartE2EDuration="18.139092947s" podCreationTimestamp="2025-11-26 00:20:15 +0000 UTC" firstStartedPulling="2025-11-26 00:20:16.129060141 +0000 UTC m=+789.319035826" lastFinishedPulling="2025-11-26 00:20:31.56047036 +0000 UTC m=+804.750446055" observedRunningTime="2025-11-26 00:20:33.136737893 +0000 UTC m=+806.326713588" watchObservedRunningTime="2025-11-26 00:20:33.139092947 +0000 UTC m=+806.329068652" Nov 26 00:20:34 crc kubenswrapper[4875]: I1126 00:20:34.075025 4875 generic.go:334] "Generic (PLEG): container finished" podID="0255b072-0216-4a62-891e-dd3e301cf793" containerID="b24c6763e3f89f6c95b965ad4e73dcc25ece0efdfb186eb75b39cc7442032006" exitCode=0 Nov 26 00:20:34 crc kubenswrapper[4875]: I1126 00:20:34.075113 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" event={"ID":"0255b072-0216-4a62-891e-dd3e301cf793","Type":"ContainerDied","Data":"b24c6763e3f89f6c95b965ad4e73dcc25ece0efdfb186eb75b39cc7442032006"} Nov 26 00:20:35 crc kubenswrapper[4875]: I1126 00:20:35.800795 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" Nov 26 00:20:35 crc kubenswrapper[4875]: I1126 00:20:35.802653 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/observability-operator-d8bb48f5d-n99xk" Nov 26 00:20:36 crc kubenswrapper[4875]: I1126 00:20:36.249725 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:36 crc kubenswrapper[4875]: I1126 00:20:36.369621 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-slv96\" (UniqueName: \"kubernetes.io/projected/0255b072-0216-4a62-891e-dd3e301cf793-kube-api-access-slv96\") pod \"0255b072-0216-4a62-891e-dd3e301cf793\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " Nov 26 00:20:36 crc kubenswrapper[4875]: I1126 00:20:36.370080 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-bundle\") pod \"0255b072-0216-4a62-891e-dd3e301cf793\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " Nov 26 00:20:36 crc kubenswrapper[4875]: I1126 00:20:36.370105 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-util\") pod \"0255b072-0216-4a62-891e-dd3e301cf793\" (UID: \"0255b072-0216-4a62-891e-dd3e301cf793\") " Nov 26 00:20:36 crc kubenswrapper[4875]: I1126 00:20:36.371235 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-bundle" (OuterVolumeSpecName: "bundle") pod "0255b072-0216-4a62-891e-dd3e301cf793" (UID: "0255b072-0216-4a62-891e-dd3e301cf793"). InnerVolumeSpecName "bundle". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:36 crc kubenswrapper[4875]: I1126 00:20:36.379183 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/0255b072-0216-4a62-891e-dd3e301cf793-kube-api-access-slv96" (OuterVolumeSpecName: "kube-api-access-slv96") pod "0255b072-0216-4a62-891e-dd3e301cf793" (UID: "0255b072-0216-4a62-891e-dd3e301cf793"). InnerVolumeSpecName "kube-api-access-slv96". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:20:36 crc kubenswrapper[4875]: I1126 00:20:36.381726 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-util" (OuterVolumeSpecName: "util") pod "0255b072-0216-4a62-891e-dd3e301cf793" (UID: "0255b072-0216-4a62-891e-dd3e301cf793"). InnerVolumeSpecName "util". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:20:36 crc kubenswrapper[4875]: I1126 00:20:36.471403 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-slv96\" (UniqueName: \"kubernetes.io/projected/0255b072-0216-4a62-891e-dd3e301cf793-kube-api-access-slv96\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:36 crc kubenswrapper[4875]: I1126 00:20:36.471452 4875 reconciler_common.go:293] "Volume detached for volume \"bundle\" (UniqueName: \"kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-bundle\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:36 crc kubenswrapper[4875]: I1126 00:20:36.471461 4875 reconciler_common.go:293] "Volume detached for volume \"util\" (UniqueName: \"kubernetes.io/empty-dir/0255b072-0216-4a62-891e-dd3e301cf793-util\") on node \"crc\" DevicePath \"\"" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.097784 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" event={"ID":"5fdc5235-fab2-4d96-8526-1ca2270a92ac","Type":"ContainerStarted","Data":"80a4540d847863a704d9f59865aec1dc26647043be8452f597439ce69a7378be"} Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.101530 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" event={"ID":"0255b072-0216-4a62-891e-dd3e301cf793","Type":"ContainerDied","Data":"b475545b04796f959a67545aa8b94d7ec5226864e66017f4ddf62d7984cace0c"} Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.101569 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="b475545b04796f959a67545aa8b94d7ec5226864e66017f4ddf62d7984cace0c" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.101611 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/1f59f640c8a0eb1a7b0f26c81382bbdde784d03eb439a940bb8da3931amv77c" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.118208 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" podStartSLOduration=10.730712407 podStartE2EDuration="15.118187941s" podCreationTimestamp="2025-11-26 00:20:22 +0000 UTC" firstStartedPulling="2025-11-26 00:20:31.819585756 +0000 UTC m=+805.009561451" lastFinishedPulling="2025-11-26 00:20:36.2070613 +0000 UTC m=+809.397036985" observedRunningTime="2025-11-26 00:20:37.116144815 +0000 UTC m=+810.306120510" watchObservedRunningTime="2025-11-26 00:20:37.118187941 +0000 UTC m=+810.308163636" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.755362 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/elasticsearch-es-default-0"] Nov 26 00:20:37 crc kubenswrapper[4875]: E1126 00:20:37.755583 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" containerName="extract-content" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.755595 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" containerName="extract-content" Nov 26 00:20:37 crc kubenswrapper[4875]: E1126 00:20:37.755607 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0255b072-0216-4a62-891e-dd3e301cf793" containerName="util" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.755613 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="0255b072-0216-4a62-891e-dd3e301cf793" containerName="util" Nov 26 00:20:37 crc kubenswrapper[4875]: E1126 00:20:37.755620 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0255b072-0216-4a62-891e-dd3e301cf793" containerName="pull" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.755626 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="0255b072-0216-4a62-891e-dd3e301cf793" containerName="pull" Nov 26 00:20:37 crc kubenswrapper[4875]: E1126 00:20:37.755635 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0255b072-0216-4a62-891e-dd3e301cf793" containerName="extract" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.755642 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="0255b072-0216-4a62-891e-dd3e301cf793" containerName="extract" Nov 26 00:20:37 crc kubenswrapper[4875]: E1126 00:20:37.755654 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" containerName="registry-server" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.755659 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" containerName="registry-server" Nov 26 00:20:37 crc kubenswrapper[4875]: E1126 00:20:37.755672 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" containerName="extract-utilities" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.755678 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" containerName="extract-utilities" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.755772 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="0255b072-0216-4a62-891e-dd3e301cf793" containerName="extract" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.755780 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="0cdb1c9a-f742-4e18-9e17-3bed2687ff54" containerName="registry-server" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.756485 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.758194 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-internal-users" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.758194 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-default-es-transport-certs" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.758701 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-remote-ca" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.759573 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"default-dockercfg-j98p8" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.759874 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-xpack-file-realm" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.759889 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-default-es-config" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.759904 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"elasticsearch-es-unicast-hosts" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.761281 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-http-certs-internal" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.761451 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"elasticsearch-es-scripts" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.773769 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/elasticsearch-es-default-0"] Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.886604 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elastic-internal-scripts\" (UniqueName: \"kubernetes.io/configmap/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-scripts\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.886872 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elastic-internal-elasticsearch-plugins-local\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-plugins-local\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.886994 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp-volume\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-tmp-volume\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.887085 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elastic-internal-http-certificates\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-http-certificates\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.887194 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elastic-internal-transport-certificates\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-transport-certificates\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.887268 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elasticsearch-logs\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elasticsearch-logs\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.887345 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elasticsearch-data\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elasticsearch-data\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.887426 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"downward-api\" (UniqueName: \"kubernetes.io/downward-api/a54f70d1-e9a4-40c7-9030-6d4ae433da24-downward-api\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.887633 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elastic-internal-xpack-file-realm\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-xpack-file-realm\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.887752 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elastic-internal-remote-certificate-authorities\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-remote-certificate-authorities\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.887864 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elastic-internal-unicast-hosts\" (UniqueName: \"kubernetes.io/configmap/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-unicast-hosts\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.887963 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elastic-internal-probe-user\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-probe-user\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.888071 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elastic-internal-elasticsearch-config-local\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-config-local\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.888154 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elastic-internal-elasticsearch-config\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-config\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.888242 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"elastic-internal-elasticsearch-bin-local\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-bin-local\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.989484 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elastic-internal-scripts\" (UniqueName: \"kubernetes.io/configmap/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-scripts\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.989536 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elastic-internal-elasticsearch-plugins-local\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-plugins-local\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.989568 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp-volume\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-tmp-volume\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.989588 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elastic-internal-http-certificates\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-http-certificates\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.989609 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elastic-internal-transport-certificates\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-transport-certificates\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.989631 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elasticsearch-logs\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elasticsearch-logs\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.989655 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elasticsearch-data\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elasticsearch-data\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.989674 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"downward-api\" (UniqueName: \"kubernetes.io/downward-api/a54f70d1-e9a4-40c7-9030-6d4ae433da24-downward-api\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.989690 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elastic-internal-xpack-file-realm\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-xpack-file-realm\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.989714 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elastic-internal-remote-certificate-authorities\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-remote-certificate-authorities\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.989734 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elastic-internal-unicast-hosts\" (UniqueName: \"kubernetes.io/configmap/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-unicast-hosts\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.990398 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elastic-internal-probe-user\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-probe-user\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.990460 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elastic-internal-elasticsearch-config-local\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-config-local\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.990484 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elastic-internal-elasticsearch-config\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-config\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.990507 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"elastic-internal-elasticsearch-bin-local\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-bin-local\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.990811 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elasticsearch-logs\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elasticsearch-logs\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.990844 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elastic-internal-elasticsearch-bin-local\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-bin-local\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.991065 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elastic-internal-elasticsearch-config-local\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-config-local\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.992392 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elastic-internal-scripts\" (UniqueName: \"kubernetes.io/configmap/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-scripts\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.992786 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elastic-internal-elasticsearch-plugins-local\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-plugins-local\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.993181 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp-volume\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-tmp-volume\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.993835 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elasticsearch-data\" (UniqueName: \"kubernetes.io/empty-dir/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elasticsearch-data\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.994452 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elastic-internal-unicast-hosts\" (UniqueName: \"kubernetes.io/configmap/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-unicast-hosts\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.997813 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elastic-internal-transport-certificates\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-transport-certificates\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.997986 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elastic-internal-remote-certificate-authorities\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-remote-certificate-authorities\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.998740 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elastic-internal-xpack-file-realm\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-xpack-file-realm\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.998764 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elastic-internal-probe-user\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-probe-user\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.999788 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elastic-internal-http-certificates\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-http-certificates\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:37 crc kubenswrapper[4875]: I1126 00:20:37.999877 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"downward-api\" (UniqueName: \"kubernetes.io/downward-api/a54f70d1-e9a4-40c7-9030-6d4ae433da24-downward-api\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:38 crc kubenswrapper[4875]: I1126 00:20:38.000867 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"elastic-internal-elasticsearch-config\" (UniqueName: \"kubernetes.io/secret/a54f70d1-e9a4-40c7-9030-6d4ae433da24-elastic-internal-elasticsearch-config\") pod \"elasticsearch-es-default-0\" (UID: \"a54f70d1-e9a4-40c7-9030-6d4ae433da24\") " pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:38 crc kubenswrapper[4875]: I1126 00:20:38.074718 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:20:41 crc kubenswrapper[4875]: I1126 00:20:41.719793 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/elasticsearch-es-default-0"] Nov 26 00:20:41 crc kubenswrapper[4875]: W1126 00:20:41.726500 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda54f70d1_e9a4_40c7_9030_6d4ae433da24.slice/crio-3cffb654902a4c3ec591511ed4304dcf7723d492fb8fbfb9eff9d4a367e673e8 WatchSource:0}: Error finding container 3cffb654902a4c3ec591511ed4304dcf7723d492fb8fbfb9eff9d4a367e673e8: Status 404 returned error can't find the container with id 3cffb654902a4c3ec591511ed4304dcf7723d492fb8fbfb9eff9d4a367e673e8 Nov 26 00:20:42 crc kubenswrapper[4875]: I1126 00:20:42.139199 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/interconnect-operator-5bb49f789d-b22zm" event={"ID":"2ca17085-9ed1-4682-be93-56e571db4862","Type":"ContainerStarted","Data":"337392ec8655cd8eb1c0feeb5fec698a96b25ccc0af039ead8d92eb5373d555a"} Nov 26 00:20:42 crc kubenswrapper[4875]: I1126 00:20:42.140574 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/elasticsearch-es-default-0" event={"ID":"a54f70d1-e9a4-40c7-9030-6d4ae433da24","Type":"ContainerStarted","Data":"3cffb654902a4c3ec591511ed4304dcf7723d492fb8fbfb9eff9d4a367e673e8"} Nov 26 00:20:42 crc kubenswrapper[4875]: I1126 00:20:42.154569 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/interconnect-operator-5bb49f789d-b22zm" podStartSLOduration=6.455733339 podStartE2EDuration="16.154550455s" podCreationTimestamp="2025-11-26 00:20:26 +0000 UTC" firstStartedPulling="2025-11-26 00:20:31.862099924 +0000 UTC m=+805.052075619" lastFinishedPulling="2025-11-26 00:20:41.56091704 +0000 UTC m=+814.750892735" observedRunningTime="2025-11-26 00:20:42.153104265 +0000 UTC m=+815.343079960" watchObservedRunningTime="2025-11-26 00:20:42.154550455 +0000 UTC m=+815.344526160" Nov 26 00:20:45 crc kubenswrapper[4875]: I1126 00:20:45.951392 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-operators/perses-operator-5446b9c989-b4svl" Nov 26 00:20:48 crc kubenswrapper[4875]: I1126 00:20:48.186901 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph" event={"ID":"7361ac7e-9f8b-43fd-a44b-a2090a837e24","Type":"ContainerStarted","Data":"1329d0f7ed4738861a9036dafdd24733a64ffff934b80f33a8a87d9fe8716b10"} Nov 26 00:20:48 crc kubenswrapper[4875]: I1126 00:20:48.207145 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-operators/obo-prometheus-operator-668cf9dfbb-r47ph" podStartSLOduration=1.439071371 podStartE2EDuration="33.207122951s" podCreationTimestamp="2025-11-26 00:20:15 +0000 UTC" firstStartedPulling="2025-11-26 00:20:15.817494696 +0000 UTC m=+789.007470391" lastFinishedPulling="2025-11-26 00:20:47.585546276 +0000 UTC m=+820.775521971" observedRunningTime="2025-11-26 00:20:48.203833082 +0000 UTC m=+821.393808777" watchObservedRunningTime="2025-11-26 00:20:48.207122951 +0000 UTC m=+821.397098646" Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.237199 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/elasticsearch-es-default-0" event={"ID":"a54f70d1-e9a4-40c7-9030-6d4ae433da24","Type":"ContainerStarted","Data":"0f77858955883196c26326ba76d2b650a4a64625ed7b6d6d06be6d49f47d8123"} Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.546044 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/elasticsearch-es-default-0"] Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.588325 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/elasticsearch-es-default-0"] Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.666199 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-6svrm"] Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.667275 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.686035 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6svrm"] Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.834531 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-catalog-content\") pod \"community-operators-6svrm\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.834941 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6m2xk\" (UniqueName: \"kubernetes.io/projected/1ee50293-2567-45a5-a04f-3c2ae937875a-kube-api-access-6m2xk\") pod \"community-operators-6svrm\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.835057 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-utilities\") pod \"community-operators-6svrm\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.935626 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-utilities\") pod \"community-operators-6svrm\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.935675 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-catalog-content\") pod \"community-operators-6svrm\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.935746 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6m2xk\" (UniqueName: \"kubernetes.io/projected/1ee50293-2567-45a5-a04f-3c2ae937875a-kube-api-access-6m2xk\") pod \"community-operators-6svrm\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.936203 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-utilities\") pod \"community-operators-6svrm\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.936237 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-catalog-content\") pod \"community-operators-6svrm\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.956901 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6m2xk\" (UniqueName: \"kubernetes.io/projected/1ee50293-2567-45a5-a04f-3c2ae937875a-kube-api-access-6m2xk\") pod \"community-operators-6svrm\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:20:54 crc kubenswrapper[4875]: I1126 00:20:54.983050 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:20:55 crc kubenswrapper[4875]: I1126 00:20:55.454335 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-6svrm"] Nov 26 00:20:56 crc kubenswrapper[4875]: I1126 00:20:56.257018 4875 generic.go:334] "Generic (PLEG): container finished" podID="1ee50293-2567-45a5-a04f-3c2ae937875a" containerID="ae1c24369b623c9f388452c317014bdec9c90b0939a8e3f4bbe5af33e7f0dfde" exitCode=0 Nov 26 00:20:56 crc kubenswrapper[4875]: I1126 00:20:56.257130 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6svrm" event={"ID":"1ee50293-2567-45a5-a04f-3c2ae937875a","Type":"ContainerDied","Data":"ae1c24369b623c9f388452c317014bdec9c90b0939a8e3f4bbe5af33e7f0dfde"} Nov 26 00:20:56 crc kubenswrapper[4875]: I1126 00:20:56.257600 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6svrm" event={"ID":"1ee50293-2567-45a5-a04f-3c2ae937875a","Type":"ContainerStarted","Data":"f16a2bc69c2f96e2c788a13912378538e730d8e956d24a8bfacb2f19cd6ded12"} Nov 26 00:20:56 crc kubenswrapper[4875]: I1126 00:20:56.259876 4875 generic.go:334] "Generic (PLEG): container finished" podID="a54f70d1-e9a4-40c7-9030-6d4ae433da24" containerID="0f77858955883196c26326ba76d2b650a4a64625ed7b6d6d06be6d49f47d8123" exitCode=0 Nov 26 00:20:56 crc kubenswrapper[4875]: I1126 00:20:56.259926 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/elasticsearch-es-default-0" event={"ID":"a54f70d1-e9a4-40c7-9030-6d4ae433da24","Type":"ContainerDied","Data":"0f77858955883196c26326ba76d2b650a4a64625ed7b6d6d06be6d49f47d8123"} Nov 26 00:20:56 crc kubenswrapper[4875]: E1126 00:20:56.522547 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poda54f70d1_e9a4_40c7_9030_6d4ae433da24.slice/crio-conmon-01f3220aa0d6e4fd0a8311b9824fb9be813286fb3db051699cffec7ac8e5315f.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.266735 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6svrm" event={"ID":"1ee50293-2567-45a5-a04f-3c2ae937875a","Type":"ContainerStarted","Data":"33c0c820764874f6f8a877a28d69f5359a8db3ad4a0a3fac2c01563e1c27e27b"} Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.268447 4875 generic.go:334] "Generic (PLEG): container finished" podID="a54f70d1-e9a4-40c7-9030-6d4ae433da24" containerID="01f3220aa0d6e4fd0a8311b9824fb9be813286fb3db051699cffec7ac8e5315f" exitCode=0 Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.268475 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/elasticsearch-es-default-0" event={"ID":"a54f70d1-e9a4-40c7-9030-6d4ae433da24","Type":"ContainerDied","Data":"01f3220aa0d6e4fd0a8311b9824fb9be813286fb3db051699cffec7ac8e5315f"} Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.844106 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5"] Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.844753 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5" Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.847297 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.847319 4875 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-sdm89" Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.847543 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.859056 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5"] Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.886510 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/529a0722-6aa3-46d7-b988-acb8f00cee78-tmp\") pod \"cert-manager-operator-controller-manager-5446d6888b-fvfc5\" (UID: \"529a0722-6aa3-46d7-b988-acb8f00cee78\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5" Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.886736 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-nk8f5\" (UniqueName: \"kubernetes.io/projected/529a0722-6aa3-46d7-b988-acb8f00cee78-kube-api-access-nk8f5\") pod \"cert-manager-operator-controller-manager-5446d6888b-fvfc5\" (UID: \"529a0722-6aa3-46d7-b988-acb8f00cee78\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5" Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.988344 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/529a0722-6aa3-46d7-b988-acb8f00cee78-tmp\") pod \"cert-manager-operator-controller-manager-5446d6888b-fvfc5\" (UID: \"529a0722-6aa3-46d7-b988-acb8f00cee78\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5" Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.988468 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-nk8f5\" (UniqueName: \"kubernetes.io/projected/529a0722-6aa3-46d7-b988-acb8f00cee78-kube-api-access-nk8f5\") pod \"cert-manager-operator-controller-manager-5446d6888b-fvfc5\" (UID: \"529a0722-6aa3-46d7-b988-acb8f00cee78\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5" Nov 26 00:20:57 crc kubenswrapper[4875]: I1126 00:20:57.988934 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"tmp\" (UniqueName: \"kubernetes.io/empty-dir/529a0722-6aa3-46d7-b988-acb8f00cee78-tmp\") pod \"cert-manager-operator-controller-manager-5446d6888b-fvfc5\" (UID: \"529a0722-6aa3-46d7-b988-acb8f00cee78\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5" Nov 26 00:20:58 crc kubenswrapper[4875]: I1126 00:20:58.008510 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-nk8f5\" (UniqueName: \"kubernetes.io/projected/529a0722-6aa3-46d7-b988-acb8f00cee78-kube-api-access-nk8f5\") pod \"cert-manager-operator-controller-manager-5446d6888b-fvfc5\" (UID: \"529a0722-6aa3-46d7-b988-acb8f00cee78\") " pod="cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5" Nov 26 00:20:58 crc kubenswrapper[4875]: I1126 00:20:58.157485 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5" Nov 26 00:20:58 crc kubenswrapper[4875]: I1126 00:20:58.285967 4875 generic.go:334] "Generic (PLEG): container finished" podID="1ee50293-2567-45a5-a04f-3c2ae937875a" containerID="33c0c820764874f6f8a877a28d69f5359a8db3ad4a0a3fac2c01563e1c27e27b" exitCode=0 Nov 26 00:20:58 crc kubenswrapper[4875]: I1126 00:20:58.286024 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6svrm" event={"ID":"1ee50293-2567-45a5-a04f-3c2ae937875a","Type":"ContainerDied","Data":"33c0c820764874f6f8a877a28d69f5359a8db3ad4a0a3fac2c01563e1c27e27b"} Nov 26 00:20:58 crc kubenswrapper[4875]: I1126 00:20:58.584736 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5"] Nov 26 00:20:59 crc kubenswrapper[4875]: I1126 00:20:59.292864 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5" event={"ID":"529a0722-6aa3-46d7-b988-acb8f00cee78","Type":"ContainerStarted","Data":"cce84ca7f85dcb5a3ee583bdbce0404bb87954cbf64fc6aa606ed7fc3f24a307"} Nov 26 00:21:04 crc kubenswrapper[4875]: I1126 00:21:04.322035 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/elasticsearch-es-default-0" event={"ID":"a54f70d1-e9a4-40c7-9030-6d4ae433da24","Type":"ContainerStarted","Data":"87511bfc33536bd811c4d8c166568a05091bfeb5152ba8578107f75b8d54db1e"} Nov 26 00:21:06 crc kubenswrapper[4875]: I1126 00:21:06.334169 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:21:07 crc kubenswrapper[4875]: I1126 00:21:07.561257 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/elasticsearch-es-default-0" podStartSLOduration=18.65195317 podStartE2EDuration="30.561238329s" podCreationTimestamp="2025-11-26 00:20:37 +0000 UTC" firstStartedPulling="2025-11-26 00:20:41.728536685 +0000 UTC m=+814.918512380" lastFinishedPulling="2025-11-26 00:20:53.637821844 +0000 UTC m=+826.827797539" observedRunningTime="2025-11-26 00:21:06.361598232 +0000 UTC m=+839.551573947" watchObservedRunningTime="2025-11-26 00:21:07.561238329 +0000 UTC m=+840.751214034" Nov 26 00:21:09 crc kubenswrapper[4875]: I1126 00:21:09.348212 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5" event={"ID":"529a0722-6aa3-46d7-b988-acb8f00cee78","Type":"ContainerStarted","Data":"d6dd78b3cac9e41bd83b1ffe3134e4f1776118646605fbe771d4a601bd5482c0"} Nov 26 00:21:09 crc kubenswrapper[4875]: I1126 00:21:09.349990 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6svrm" event={"ID":"1ee50293-2567-45a5-a04f-3c2ae937875a","Type":"ContainerStarted","Data":"83492c9aa999b2cc603cf377cf75279566018c761cac5d270f11bf658b455e48"} Nov 26 00:21:09 crc kubenswrapper[4875]: I1126 00:21:09.370078 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager-operator/cert-manager-operator-controller-manager-5446d6888b-fvfc5" podStartSLOduration=2.063943076 podStartE2EDuration="12.37006087s" podCreationTimestamp="2025-11-26 00:20:57 +0000 UTC" firstStartedPulling="2025-11-26 00:20:58.593711406 +0000 UTC m=+831.783687101" lastFinishedPulling="2025-11-26 00:21:08.89982919 +0000 UTC m=+842.089804895" observedRunningTime="2025-11-26 00:21:09.364147986 +0000 UTC m=+842.554123681" watchObservedRunningTime="2025-11-26 00:21:09.37006087 +0000 UTC m=+842.560036565" Nov 26 00:21:09 crc kubenswrapper[4875]: I1126 00:21:09.387043 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-6svrm" podStartSLOduration=2.745594126 podStartE2EDuration="15.387023031s" podCreationTimestamp="2025-11-26 00:20:54 +0000 UTC" firstStartedPulling="2025-11-26 00:20:56.258569949 +0000 UTC m=+829.448545634" lastFinishedPulling="2025-11-26 00:21:08.899998804 +0000 UTC m=+842.089974539" observedRunningTime="2025-11-26 00:21:09.384787478 +0000 UTC m=+842.574763193" watchObservedRunningTime="2025-11-26 00:21:09.387023031 +0000 UTC m=+842.576998726" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.142040 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-2bx8m"] Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.143318 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.145383 4875 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-knx9h" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.145396 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.145596 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.154316 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-2bx8m"] Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.197243 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7b94883c-6fc2-4f66-a969-121d26394910-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-2bx8m\" (UID: \"7b94883c-6fc2-4f66-a969-121d26394910\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.197312 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-969j5\" (UniqueName: \"kubernetes.io/projected/7b94883c-6fc2-4f66-a969-121d26394910-kube-api-access-969j5\") pod \"cert-manager-webhook-f4fb5df64-2bx8m\" (UID: \"7b94883c-6fc2-4f66-a969-121d26394910\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.298806 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7b94883c-6fc2-4f66-a969-121d26394910-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-2bx8m\" (UID: \"7b94883c-6fc2-4f66-a969-121d26394910\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.299107 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-969j5\" (UniqueName: \"kubernetes.io/projected/7b94883c-6fc2-4f66-a969-121d26394910-kube-api-access-969j5\") pod \"cert-manager-webhook-f4fb5df64-2bx8m\" (UID: \"7b94883c-6fc2-4f66-a969-121d26394910\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.318617 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/7b94883c-6fc2-4f66-a969-121d26394910-bound-sa-token\") pod \"cert-manager-webhook-f4fb5df64-2bx8m\" (UID: \"7b94883c-6fc2-4f66-a969-121d26394910\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.326064 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-969j5\" (UniqueName: \"kubernetes.io/projected/7b94883c-6fc2-4f66-a969-121d26394910-kube-api-access-969j5\") pod \"cert-manager-webhook-f4fb5df64-2bx8m\" (UID: \"7b94883c-6fc2-4f66-a969-121d26394910\") " pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.456871 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" Nov 26 00:21:13 crc kubenswrapper[4875]: I1126 00:21:13.947008 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-webhook-f4fb5df64-2bx8m"] Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.376177 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" event={"ID":"7b94883c-6fc2-4f66-a969-121d26394910","Type":"ContainerStarted","Data":"61d4c92a1cb907a17c5b99242a1d923411d07c4081b4820d868cfd16d6816353"} Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.477522 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx"] Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.478446 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.481344 4875 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-sk7kb" Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.489149 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx"] Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.516073 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2bf438ed-b485-429e-ab20-c5f354a3ab25-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-tcxqx\" (UID: \"2bf438ed-b485-429e-ab20-c5f354a3ab25\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.516144 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-sqvkk\" (UniqueName: \"kubernetes.io/projected/2bf438ed-b485-429e-ab20-c5f354a3ab25-kube-api-access-sqvkk\") pod \"cert-manager-cainjector-855d9ccff4-tcxqx\" (UID: \"2bf438ed-b485-429e-ab20-c5f354a3ab25\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.617987 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2bf438ed-b485-429e-ab20-c5f354a3ab25-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-tcxqx\" (UID: \"2bf438ed-b485-429e-ab20-c5f354a3ab25\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.618058 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-sqvkk\" (UniqueName: \"kubernetes.io/projected/2bf438ed-b485-429e-ab20-c5f354a3ab25-kube-api-access-sqvkk\") pod \"cert-manager-cainjector-855d9ccff4-tcxqx\" (UID: \"2bf438ed-b485-429e-ab20-c5f354a3ab25\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.637045 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/2bf438ed-b485-429e-ab20-c5f354a3ab25-bound-sa-token\") pod \"cert-manager-cainjector-855d9ccff4-tcxqx\" (UID: \"2bf438ed-b485-429e-ab20-c5f354a3ab25\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.637182 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-sqvkk\" (UniqueName: \"kubernetes.io/projected/2bf438ed-b485-429e-ab20-c5f354a3ab25-kube-api-access-sqvkk\") pod \"cert-manager-cainjector-855d9ccff4-tcxqx\" (UID: \"2bf438ed-b485-429e-ab20-c5f354a3ab25\") " pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.797080 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.990441 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:21:14 crc kubenswrapper[4875]: I1126 00:21:14.991553 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.087045 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx"] Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.113028 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.285807 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/service-telemetry-operator-1-build"] Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.286716 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.292938 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"service-telemetry-operator-1-global-ca" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.292971 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"service-telemetry-operator-1-ca" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.293764 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"service-telemetry-operator-1-sys-config" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.302854 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"builder-dockercfg-5d5px" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.321594 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/service-telemetry-operator-1-build"] Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329123 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-system-configs\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329164 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-ca-bundles\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329194 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-cf5dw\" (UniqueName: \"kubernetes.io/projected/e8c8a0d0-5043-4941-acd7-e9e233b0542e-kube-api-access-cf5dw\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329220 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-proxy-ca-bundles\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329247 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildworkdir\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329267 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-root\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329283 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildcachedir\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329303 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-blob-cache\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329318 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-run\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329340 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-push\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329356 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-pull\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.329380 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-node-pullsecrets\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.384309 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" event={"ID":"2bf438ed-b485-429e-ab20-c5f354a3ab25","Type":"ContainerStarted","Data":"29673d815a71f0020301cdd1b3da8db9611f4d7dd1e5e0ac7e5fc8f525ef3f64"} Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430184 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildcachedir\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430238 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-blob-cache\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430261 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-run\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430290 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-push\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430304 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-pull\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430335 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-node-pullsecrets\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430387 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-system-configs\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430426 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-ca-bundles\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430514 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-cf5dw\" (UniqueName: \"kubernetes.io/projected/e8c8a0d0-5043-4941-acd7-e9e233b0542e-kube-api-access-cf5dw\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430554 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-proxy-ca-bundles\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430580 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildworkdir\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.430600 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-root\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.431808 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-root\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.431902 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildcachedir\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.432073 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-blob-cache\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.432252 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-run\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.432711 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-node-pullsecrets\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.433324 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-ca-bundles\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.433665 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.433694 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-system-configs\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.433778 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildworkdir\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.434108 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-proxy-ca-bundles\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.442655 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-push\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.443248 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-pull\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.450887 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-cf5dw\" (UniqueName: \"kubernetes.io/projected/e8c8a0d0-5043-4941-acd7-e9e233b0542e-kube-api-access-cf5dw\") pod \"service-telemetry-operator-1-build\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.492784 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6svrm"] Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.606056 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:15 crc kubenswrapper[4875]: I1126 00:21:15.826695 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/service-telemetry-operator-1-build"] Nov 26 00:21:15 crc kubenswrapper[4875]: W1126 00:21:15.847479 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pode8c8a0d0_5043_4941_acd7_e9e233b0542e.slice/crio-026d009193b6c20f2e4a38d7070451b3f2e28aeb6aa61a622fa2c6589c129859 WatchSource:0}: Error finding container 026d009193b6c20f2e4a38d7070451b3f2e28aeb6aa61a622fa2c6589c129859: Status 404 returned error can't find the container with id 026d009193b6c20f2e4a38d7070451b3f2e28aeb6aa61a622fa2c6589c129859 Nov 26 00:21:16 crc kubenswrapper[4875]: I1126 00:21:16.393477 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/service-telemetry-operator-1-build" event={"ID":"e8c8a0d0-5043-4941-acd7-e9e233b0542e","Type":"ContainerStarted","Data":"026d009193b6c20f2e4a38d7070451b3f2e28aeb6aa61a622fa2c6589c129859"} Nov 26 00:21:17 crc kubenswrapper[4875]: I1126 00:21:17.400570 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-6svrm" podUID="1ee50293-2567-45a5-a04f-3c2ae937875a" containerName="registry-server" containerID="cri-o://83492c9aa999b2cc603cf377cf75279566018c761cac5d270f11bf658b455e48" gracePeriod=2 Nov 26 00:21:18 crc kubenswrapper[4875]: I1126 00:21:18.147307 4875 prober.go:107] "Probe failed" probeType="Readiness" pod="service-telemetry/elasticsearch-es-default-0" podUID="a54f70d1-e9a4-40c7-9030-6d4ae433da24" containerName="elasticsearch" probeResult="failure" output=< Nov 26 00:21:18 crc kubenswrapper[4875]: {"timestamp": "2025-11-26T00:21:18+00:00", "message": "readiness probe failed", "curl_rc": "7"} Nov 26 00:21:18 crc kubenswrapper[4875]: > Nov 26 00:21:18 crc kubenswrapper[4875]: I1126 00:21:18.413399 4875 generic.go:334] "Generic (PLEG): container finished" podID="1ee50293-2567-45a5-a04f-3c2ae937875a" containerID="83492c9aa999b2cc603cf377cf75279566018c761cac5d270f11bf658b455e48" exitCode=0 Nov 26 00:21:18 crc kubenswrapper[4875]: I1126 00:21:18.413485 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6svrm" event={"ID":"1ee50293-2567-45a5-a04f-3c2ae937875a","Type":"ContainerDied","Data":"83492c9aa999b2cc603cf377cf75279566018c761cac5d270f11bf658b455e48"} Nov 26 00:21:21 crc kubenswrapper[4875]: I1126 00:21:21.543269 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:21:21 crc kubenswrapper[4875]: I1126 00:21:21.642019 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6m2xk\" (UniqueName: \"kubernetes.io/projected/1ee50293-2567-45a5-a04f-3c2ae937875a-kube-api-access-6m2xk\") pod \"1ee50293-2567-45a5-a04f-3c2ae937875a\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " Nov 26 00:21:21 crc kubenswrapper[4875]: I1126 00:21:21.642311 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-catalog-content\") pod \"1ee50293-2567-45a5-a04f-3c2ae937875a\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " Nov 26 00:21:21 crc kubenswrapper[4875]: I1126 00:21:21.642506 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-utilities\") pod \"1ee50293-2567-45a5-a04f-3c2ae937875a\" (UID: \"1ee50293-2567-45a5-a04f-3c2ae937875a\") " Nov 26 00:21:21 crc kubenswrapper[4875]: I1126 00:21:21.643275 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-utilities" (OuterVolumeSpecName: "utilities") pod "1ee50293-2567-45a5-a04f-3c2ae937875a" (UID: "1ee50293-2567-45a5-a04f-3c2ae937875a"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:21:21 crc kubenswrapper[4875]: I1126 00:21:21.648116 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/1ee50293-2567-45a5-a04f-3c2ae937875a-kube-api-access-6m2xk" (OuterVolumeSpecName: "kube-api-access-6m2xk") pod "1ee50293-2567-45a5-a04f-3c2ae937875a" (UID: "1ee50293-2567-45a5-a04f-3c2ae937875a"). InnerVolumeSpecName "kube-api-access-6m2xk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:21:21 crc kubenswrapper[4875]: I1126 00:21:21.711585 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "1ee50293-2567-45a5-a04f-3c2ae937875a" (UID: "1ee50293-2567-45a5-a04f-3c2ae937875a"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:21:21 crc kubenswrapper[4875]: I1126 00:21:21.743644 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:21 crc kubenswrapper[4875]: I1126 00:21:21.743681 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6m2xk\" (UniqueName: \"kubernetes.io/projected/1ee50293-2567-45a5-a04f-3c2ae937875a-kube-api-access-6m2xk\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:21 crc kubenswrapper[4875]: I1126 00:21:21.743693 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/1ee50293-2567-45a5-a04f-3c2ae937875a-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:22 crc kubenswrapper[4875]: I1126 00:21:22.450226 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-6svrm" event={"ID":"1ee50293-2567-45a5-a04f-3c2ae937875a","Type":"ContainerDied","Data":"f16a2bc69c2f96e2c788a13912378538e730d8e956d24a8bfacb2f19cd6ded12"} Nov 26 00:21:22 crc kubenswrapper[4875]: I1126 00:21:22.450280 4875 scope.go:117] "RemoveContainer" containerID="83492c9aa999b2cc603cf377cf75279566018c761cac5d270f11bf658b455e48" Nov 26 00:21:22 crc kubenswrapper[4875]: I1126 00:21:22.450399 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-6svrm" Nov 26 00:21:22 crc kubenswrapper[4875]: I1126 00:21:22.478589 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-6svrm"] Nov 26 00:21:22 crc kubenswrapper[4875]: I1126 00:21:22.486931 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-6svrm"] Nov 26 00:21:23 crc kubenswrapper[4875]: I1126 00:21:23.553009 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="1ee50293-2567-45a5-a04f-3c2ae937875a" path="/var/lib/kubelet/pods/1ee50293-2567-45a5-a04f-3c2ae937875a/volumes" Nov 26 00:21:23 crc kubenswrapper[4875]: I1126 00:21:23.563377 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="service-telemetry/elasticsearch-es-default-0" Nov 26 00:21:25 crc kubenswrapper[4875]: I1126 00:21:25.551091 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["service-telemetry/service-telemetry-operator-1-build"] Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.141132 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/service-telemetry-operator-2-build"] Nov 26 00:21:27 crc kubenswrapper[4875]: E1126 00:21:27.141953 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ee50293-2567-45a5-a04f-3c2ae937875a" containerName="registry-server" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.141969 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ee50293-2567-45a5-a04f-3c2ae937875a" containerName="registry-server" Nov 26 00:21:27 crc kubenswrapper[4875]: E1126 00:21:27.141989 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ee50293-2567-45a5-a04f-3c2ae937875a" containerName="extract-utilities" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.141996 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ee50293-2567-45a5-a04f-3c2ae937875a" containerName="extract-utilities" Nov 26 00:21:27 crc kubenswrapper[4875]: E1126 00:21:27.142014 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="1ee50293-2567-45a5-a04f-3c2ae937875a" containerName="extract-content" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.142022 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="1ee50293-2567-45a5-a04f-3c2ae937875a" containerName="extract-content" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.142147 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="1ee50293-2567-45a5-a04f-3c2ae937875a" containerName="registry-server" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.143088 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.145799 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"service-telemetry-operator-2-ca" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.145964 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"service-telemetry-operator-2-sys-config" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.149030 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"service-telemetry-operator-2-global-ca" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.155867 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/service-telemetry-operator-2-build"] Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.216665 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildworkdir\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.216716 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildcachedir\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.216738 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-system-configs\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.216778 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-blob-cache\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.216885 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-wjwcn\" (UniqueName: \"kubernetes.io/projected/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-kube-api-access-wjwcn\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.216928 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-push\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.216947 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-pull\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.216966 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-ca-bundles\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.217011 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-node-pullsecrets\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.217040 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-proxy-ca-bundles\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.217087 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-root\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.217155 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-run\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.318966 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-wjwcn\" (UniqueName: \"kubernetes.io/projected/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-kube-api-access-wjwcn\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319023 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-push\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319049 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-pull\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319071 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-ca-bundles\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319100 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-node-pullsecrets\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319127 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-proxy-ca-bundles\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319164 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-root\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319201 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-run\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319252 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildworkdir\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319274 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-node-pullsecrets\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319297 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildcachedir\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319328 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildcachedir\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319360 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-system-configs\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319434 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-blob-cache\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319796 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-root\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319942 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-run\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.319970 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-blob-cache\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.320071 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildworkdir\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.320304 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-proxy-ca-bundles\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.320603 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-system-configs\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.321020 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-ca-bundles\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.324385 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-pull\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.328784 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-push\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.336076 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-wjwcn\" (UniqueName: \"kubernetes.io/projected/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-kube-api-access-wjwcn\") pod \"service-telemetry-operator-2-build\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.446771 4875 scope.go:117] "RemoveContainer" containerID="33c0c820764874f6f8a877a28d69f5359a8db3ad4a0a3fac2c01563e1c27e27b" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.459778 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.492361 4875 scope.go:117] "RemoveContainer" containerID="ae1c24369b623c9f388452c317014bdec9c90b0939a8e3f4bbe5af33e7f0dfde" Nov 26 00:21:27 crc kubenswrapper[4875]: I1126 00:21:27.960946 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/service-telemetry-operator-2-build"] Nov 26 00:21:27 crc kubenswrapper[4875]: W1126 00:21:27.964814 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod9c08b0e5_f81f_4f46_a1da_40a7dd548a6a.slice/crio-a10558d247611a8dc9e82a62403989b2c8f2eea39f787f7329ec279336e6ae92 WatchSource:0}: Error finding container a10558d247611a8dc9e82a62403989b2c8f2eea39f787f7329ec279336e6ae92: Status 404 returned error can't find the container with id a10558d247611a8dc9e82a62403989b2c8f2eea39f787f7329ec279336e6ae92 Nov 26 00:21:28 crc kubenswrapper[4875]: I1126 00:21:28.484765 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" event={"ID":"2bf438ed-b485-429e-ab20-c5f354a3ab25","Type":"ContainerStarted","Data":"69ad54490d14386699363e8f1df531287523df983db3348f579efef865b0d906"} Nov 26 00:21:28 crc kubenswrapper[4875]: I1126 00:21:28.486393 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/service-telemetry-operator-1-build" event={"ID":"e8c8a0d0-5043-4941-acd7-e9e233b0542e","Type":"ContainerStarted","Data":"2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399"} Nov 26 00:21:28 crc kubenswrapper[4875]: I1126 00:21:28.486556 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="service-telemetry/service-telemetry-operator-1-build" podUID="e8c8a0d0-5043-4941-acd7-e9e233b0542e" containerName="manage-dockerfile" containerID="cri-o://2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399" gracePeriod=30 Nov 26 00:21:28 crc kubenswrapper[4875]: I1126 00:21:28.488202 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/service-telemetry-operator-2-build" event={"ID":"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a","Type":"ContainerStarted","Data":"746a835f7c7bb931ad1d317104581beab2ef3297ee8f39ded48370cebd05bae1"} Nov 26 00:21:28 crc kubenswrapper[4875]: I1126 00:21:28.488231 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/service-telemetry-operator-2-build" event={"ID":"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a","Type":"ContainerStarted","Data":"a10558d247611a8dc9e82a62403989b2c8f2eea39f787f7329ec279336e6ae92"} Nov 26 00:21:28 crc kubenswrapper[4875]: I1126 00:21:28.490889 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" event={"ID":"7b94883c-6fc2-4f66-a969-121d26394910","Type":"ContainerStarted","Data":"67f251cca766c6ac1ed3eb9ad9f53791fe63fcc972b214b8eb8c2d05fac68609"} Nov 26 00:21:28 crc kubenswrapper[4875]: I1126 00:21:28.491033 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" Nov 26 00:21:28 crc kubenswrapper[4875]: I1126 00:21:28.503983 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" podStartSLOduration=2.117623048 podStartE2EDuration="14.503963767s" podCreationTimestamp="2025-11-26 00:21:14 +0000 UTC" firstStartedPulling="2025-11-26 00:21:15.115170273 +0000 UTC m=+848.305145968" lastFinishedPulling="2025-11-26 00:21:27.501510992 +0000 UTC m=+860.691486687" observedRunningTime="2025-11-26 00:21:28.499078871 +0000 UTC m=+861.689054566" watchObservedRunningTime="2025-11-26 00:21:28.503963767 +0000 UTC m=+861.693939462" Nov 26 00:21:28 crc kubenswrapper[4875]: I1126 00:21:28.538797 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" podStartSLOduration=1.972020748 podStartE2EDuration="15.538781994s" podCreationTimestamp="2025-11-26 00:21:13 +0000 UTC" firstStartedPulling="2025-11-26 00:21:13.953401844 +0000 UTC m=+847.143377529" lastFinishedPulling="2025-11-26 00:21:27.52016308 +0000 UTC m=+860.710138775" observedRunningTime="2025-11-26 00:21:28.518863591 +0000 UTC m=+861.708839286" watchObservedRunningTime="2025-11-26 00:21:28.538781994 +0000 UTC m=+861.728757689" Nov 26 00:21:28 crc kubenswrapper[4875]: I1126 00:21:28.926380 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_service-telemetry-operator-1-build_e8c8a0d0-5043-4941-acd7-e9e233b0542e/manage-dockerfile/0.log" Nov 26 00:21:28 crc kubenswrapper[4875]: I1126 00:21:28.926481 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.039795 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-root\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.039896 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-run\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.039959 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildworkdir\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040004 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-blob-cache\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040055 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-ca-bundles\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040087 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-proxy-ca-bundles\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040145 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-push\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040217 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-pull\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040229 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-root" (OuterVolumeSpecName: "container-storage-root") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "container-storage-root". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040259 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildcachedir\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040335 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildcachedir" (OuterVolumeSpecName: "buildcachedir") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "buildcachedir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040388 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-cf5dw\" (UniqueName: \"kubernetes.io/projected/e8c8a0d0-5043-4941-acd7-e9e233b0542e-kube-api-access-cf5dw\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040445 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-system-configs\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040488 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-node-pullsecrets\") pod \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\" (UID: \"e8c8a0d0-5043-4941-acd7-e9e233b0542e\") " Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.040730 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-run" (OuterVolumeSpecName: "container-storage-run") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "container-storage-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.041196 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-root\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.041211 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-container-storage-run\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.041203 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildworkdir" (OuterVolumeSpecName: "buildworkdir") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "buildworkdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.041221 4875 reconciler_common.go:293] "Volume detached for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildcachedir\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.041494 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-blob-cache" (OuterVolumeSpecName: "build-blob-cache") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "build-blob-cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.041660 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.041715 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-system-configs" (OuterVolumeSpecName: "build-system-configs") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "build-system-configs". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.041772 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-proxy-ca-bundles" (OuterVolumeSpecName: "build-proxy-ca-bundles") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "build-proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.042089 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-ca-bundles" (OuterVolumeSpecName: "build-ca-bundles") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "build-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.050635 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-pull" (OuterVolumeSpecName: "builder-dockercfg-5d5px-pull") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "builder-dockercfg-5d5px-pull". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.051640 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-push" (OuterVolumeSpecName: "builder-dockercfg-5d5px-push") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "builder-dockercfg-5d5px-push". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.053071 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/e8c8a0d0-5043-4941-acd7-e9e233b0542e-kube-api-access-cf5dw" (OuterVolumeSpecName: "kube-api-access-cf5dw") pod "e8c8a0d0-5043-4941-acd7-e9e233b0542e" (UID: "e8c8a0d0-5043-4941-acd7-e9e233b0542e"). InnerVolumeSpecName "kube-api-access-cf5dw". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.142600 4875 reconciler_common.go:293] "Volume detached for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-buildworkdir\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.142636 4875 reconciler_common.go:293] "Volume detached for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-blob-cache\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.142646 4875 reconciler_common.go:293] "Volume detached for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.142656 4875 reconciler_common.go:293] "Volume detached for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.142667 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-push\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.142676 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/e8c8a0d0-5043-4941-acd7-e9e233b0542e-builder-dockercfg-5d5px-pull\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.142685 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-cf5dw\" (UniqueName: \"kubernetes.io/projected/e8c8a0d0-5043-4941-acd7-e9e233b0542e-kube-api-access-cf5dw\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.142693 4875 reconciler_common.go:293] "Volume detached for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/e8c8a0d0-5043-4941-acd7-e9e233b0542e-build-system-configs\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.142701 4875 reconciler_common.go:293] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/e8c8a0d0-5043-4941-acd7-e9e233b0542e-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.498650 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_service-telemetry-operator-1-build_e8c8a0d0-5043-4941-acd7-e9e233b0542e/manage-dockerfile/0.log" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.499019 4875 generic.go:334] "Generic (PLEG): container finished" podID="e8c8a0d0-5043-4941-acd7-e9e233b0542e" containerID="2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399" exitCode=1 Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.499525 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/service-telemetry-operator-1-build" event={"ID":"e8c8a0d0-5043-4941-acd7-e9e233b0542e","Type":"ContainerDied","Data":"2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399"} Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.499594 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/service-telemetry-operator-1-build" event={"ID":"e8c8a0d0-5043-4941-acd7-e9e233b0542e","Type":"ContainerDied","Data":"026d009193b6c20f2e4a38d7070451b3f2e28aeb6aa61a622fa2c6589c129859"} Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.499615 4875 scope.go:117] "RemoveContainer" containerID="2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.499881 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/service-telemetry-operator-1-build" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.518287 4875 scope.go:117] "RemoveContainer" containerID="2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399" Nov 26 00:21:29 crc kubenswrapper[4875]: E1126 00:21:29.518677 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399\": container with ID starting with 2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399 not found: ID does not exist" containerID="2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.518707 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399"} err="failed to get container status \"2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399\": rpc error: code = NotFound desc = could not find container \"2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399\": container with ID starting with 2fe2a23e8b052bc5abc29bd29c07e27aae6eb02b1cd99dfd4e47c77071547399 not found: ID does not exist" Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.536600 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["service-telemetry/service-telemetry-operator-1-build"] Nov 26 00:21:29 crc kubenswrapper[4875]: I1126 00:21:29.536750 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["service-telemetry/service-telemetry-operator-1-build"] Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.536427 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="e8c8a0d0-5043-4941-acd7-e9e233b0542e" path="/var/lib/kubelet/pods/e8c8a0d0-5043-4941-acd7-e9e233b0542e/volumes" Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.709434 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["cert-manager/cert-manager-86cb77c54b-6mr68"] Nov 26 00:21:31 crc kubenswrapper[4875]: E1126 00:21:31.709659 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="e8c8a0d0-5043-4941-acd7-e9e233b0542e" containerName="manage-dockerfile" Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.709670 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="e8c8a0d0-5043-4941-acd7-e9e233b0542e" containerName="manage-dockerfile" Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.709767 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="e8c8a0d0-5043-4941-acd7-e9e233b0542e" containerName="manage-dockerfile" Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.710173 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-6mr68" Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.712813 4875 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-x9c9w" Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.720394 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-6mr68"] Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.778292 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/20d9eaa7-e48a-4f9e-aa76-f7fe8494e270-bound-sa-token\") pod \"cert-manager-86cb77c54b-6mr68\" (UID: \"20d9eaa7-e48a-4f9e-aa76-f7fe8494e270\") " pod="cert-manager/cert-manager-86cb77c54b-6mr68" Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.778362 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-p5fdp\" (UniqueName: \"kubernetes.io/projected/20d9eaa7-e48a-4f9e-aa76-f7fe8494e270-kube-api-access-p5fdp\") pod \"cert-manager-86cb77c54b-6mr68\" (UID: \"20d9eaa7-e48a-4f9e-aa76-f7fe8494e270\") " pod="cert-manager/cert-manager-86cb77c54b-6mr68" Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.879542 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-p5fdp\" (UniqueName: \"kubernetes.io/projected/20d9eaa7-e48a-4f9e-aa76-f7fe8494e270-kube-api-access-p5fdp\") pod \"cert-manager-86cb77c54b-6mr68\" (UID: \"20d9eaa7-e48a-4f9e-aa76-f7fe8494e270\") " pod="cert-manager/cert-manager-86cb77c54b-6mr68" Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.879685 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/20d9eaa7-e48a-4f9e-aa76-f7fe8494e270-bound-sa-token\") pod \"cert-manager-86cb77c54b-6mr68\" (UID: \"20d9eaa7-e48a-4f9e-aa76-f7fe8494e270\") " pod="cert-manager/cert-manager-86cb77c54b-6mr68" Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.898905 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"bound-sa-token\" (UniqueName: \"kubernetes.io/projected/20d9eaa7-e48a-4f9e-aa76-f7fe8494e270-bound-sa-token\") pod \"cert-manager-86cb77c54b-6mr68\" (UID: \"20d9eaa7-e48a-4f9e-aa76-f7fe8494e270\") " pod="cert-manager/cert-manager-86cb77c54b-6mr68" Nov 26 00:21:31 crc kubenswrapper[4875]: I1126 00:21:31.898938 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-p5fdp\" (UniqueName: \"kubernetes.io/projected/20d9eaa7-e48a-4f9e-aa76-f7fe8494e270-kube-api-access-p5fdp\") pod \"cert-manager-86cb77c54b-6mr68\" (UID: \"20d9eaa7-e48a-4f9e-aa76-f7fe8494e270\") " pod="cert-manager/cert-manager-86cb77c54b-6mr68" Nov 26 00:21:32 crc kubenswrapper[4875]: I1126 00:21:32.025218 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="cert-manager/cert-manager-86cb77c54b-6mr68" Nov 26 00:21:32 crc kubenswrapper[4875]: I1126 00:21:32.456442 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["cert-manager/cert-manager-86cb77c54b-6mr68"] Nov 26 00:21:32 crc kubenswrapper[4875]: W1126 00:21:32.463925 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-besteffort.slice/kubepods-besteffort-pod20d9eaa7_e48a_4f9e_aa76_f7fe8494e270.slice/crio-d17e99a8141038babafae417c9621ff05699137fd65e2cdcee19c05f5c5b4949 WatchSource:0}: Error finding container d17e99a8141038babafae417c9621ff05699137fd65e2cdcee19c05f5c5b4949: Status 404 returned error can't find the container with id d17e99a8141038babafae417c9621ff05699137fd65e2cdcee19c05f5c5b4949 Nov 26 00:21:32 crc kubenswrapper[4875]: I1126 00:21:32.516128 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-6mr68" event={"ID":"20d9eaa7-e48a-4f9e-aa76-f7fe8494e270","Type":"ContainerStarted","Data":"d17e99a8141038babafae417c9621ff05699137fd65e2cdcee19c05f5c5b4949"} Nov 26 00:21:33 crc kubenswrapper[4875]: I1126 00:21:33.459308 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="cert-manager/cert-manager-webhook-f4fb5df64-2bx8m" Nov 26 00:21:33 crc kubenswrapper[4875]: I1126 00:21:33.523297 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-6mr68" event={"ID":"20d9eaa7-e48a-4f9e-aa76-f7fe8494e270","Type":"ContainerStarted","Data":"d27ec70223854359541ce1e9264aa45738e5894a0e130a1de8d3353cfbfd9120"} Nov 26 00:21:33 crc kubenswrapper[4875]: I1126 00:21:33.547930 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="cert-manager/cert-manager-86cb77c54b-6mr68" podStartSLOduration=2.547909662 podStartE2EDuration="2.547909662s" podCreationTimestamp="2025-11-26 00:21:31 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:21:33.536926487 +0000 UTC m=+866.726902182" watchObservedRunningTime="2025-11-26 00:21:33.547909662 +0000 UTC m=+866.737885357" Nov 26 00:21:36 crc kubenswrapper[4875]: I1126 00:21:36.543180 4875 generic.go:334] "Generic (PLEG): container finished" podID="9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" containerID="746a835f7c7bb931ad1d317104581beab2ef3297ee8f39ded48370cebd05bae1" exitCode=0 Nov 26 00:21:36 crc kubenswrapper[4875]: I1126 00:21:36.543293 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/service-telemetry-operator-2-build" event={"ID":"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a","Type":"ContainerDied","Data":"746a835f7c7bb931ad1d317104581beab2ef3297ee8f39ded48370cebd05bae1"} Nov 26 00:21:37 crc kubenswrapper[4875]: I1126 00:21:37.552650 4875 generic.go:334] "Generic (PLEG): container finished" podID="9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" containerID="fd61f35393c647bf660ae7c0f264560aa0f1eb055bb2e31a763e1456c61dcfa3" exitCode=0 Nov 26 00:21:37 crc kubenswrapper[4875]: I1126 00:21:37.552751 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/service-telemetry-operator-2-build" event={"ID":"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a","Type":"ContainerDied","Data":"fd61f35393c647bf660ae7c0f264560aa0f1eb055bb2e31a763e1456c61dcfa3"} Nov 26 00:21:37 crc kubenswrapper[4875]: I1126 00:21:37.585809 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_service-telemetry-operator-2-build_9c08b0e5-f81f-4f46-a1da-40a7dd548a6a/manage-dockerfile/0.log" Nov 26 00:21:38 crc kubenswrapper[4875]: I1126 00:21:38.561579 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/service-telemetry-operator-2-build" event={"ID":"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a","Type":"ContainerStarted","Data":"37ad6ae018d6137adf337a8506d92d1b9510c76dcb47367e4a97888642c477af"} Nov 26 00:21:38 crc kubenswrapper[4875]: I1126 00:21:38.599868 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/service-telemetry-operator-2-build" podStartSLOduration=11.599852738 podStartE2EDuration="11.599852738s" podCreationTimestamp="2025-11-26 00:21:27 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:21:38.594699885 +0000 UTC m=+871.784675580" watchObservedRunningTime="2025-11-26 00:21:38.599852738 +0000 UTC m=+871.789828433" Nov 26 00:22:07 crc kubenswrapper[4875]: I1126 00:22:07.512006 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:22:07 crc kubenswrapper[4875]: I1126 00:22:07.512952 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:22:37 crc kubenswrapper[4875]: I1126 00:22:37.511834 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:22:37 crc kubenswrapper[4875]: I1126 00:22:37.512352 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:23:07 crc kubenswrapper[4875]: I1126 00:23:07.511649 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:23:07 crc kubenswrapper[4875]: I1126 00:23:07.512101 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:23:07 crc kubenswrapper[4875]: I1126 00:23:07.512148 4875 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:23:07 crc kubenswrapper[4875]: I1126 00:23:07.513033 4875 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9"} pod="openshift-machine-config-operator/machine-config-daemon-9mztb" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 00:23:07 crc kubenswrapper[4875]: I1126 00:23:07.513098 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" containerID="cri-o://46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9" gracePeriod=600 Nov 26 00:23:07 crc kubenswrapper[4875]: I1126 00:23:07.738456 4875 generic.go:334] "Generic (PLEG): container finished" podID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerID="46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9" exitCode=0 Nov 26 00:23:07 crc kubenswrapper[4875]: I1126 00:23:07.738518 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerDied","Data":"46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9"} Nov 26 00:23:07 crc kubenswrapper[4875]: I1126 00:23:07.738746 4875 scope.go:117] "RemoveContainer" containerID="4a7640d4f785bf9d0f293ee193979132e8a131e63ea85ce86ad12662c6293db6" Nov 26 00:23:08 crc kubenswrapper[4875]: E1126 00:23:08.282176 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:23:08 crc kubenswrapper[4875]: I1126 00:23:08.745467 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerStarted","Data":"924f375e812a0719e7bc1f7b009dabf2f368d974e2fd9a554bfc2a38ef6023a5"} Nov 26 00:23:18 crc kubenswrapper[4875]: E1126 00:23:18.423480 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:23:28 crc kubenswrapper[4875]: E1126 00:23:28.548773 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:23:30 crc kubenswrapper[4875]: I1126 00:23:30.864161 4875 generic.go:334] "Generic (PLEG): container finished" podID="9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" containerID="37ad6ae018d6137adf337a8506d92d1b9510c76dcb47367e4a97888642c477af" exitCode=0 Nov 26 00:23:30 crc kubenswrapper[4875]: I1126 00:23:30.864207 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/service-telemetry-operator-2-build" event={"ID":"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a","Type":"ContainerDied","Data":"37ad6ae018d6137adf337a8506d92d1b9510c76dcb47367e4a97888642c477af"} Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.086759 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.155684 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-wjwcn\" (UniqueName: \"kubernetes.io/projected/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-kube-api-access-wjwcn\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.156166 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-blob-cache\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.164005 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-kube-api-access-wjwcn" (OuterVolumeSpecName: "kube-api-access-wjwcn") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "kube-api-access-wjwcn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.257250 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-pull\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.257325 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildcachedir\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.257348 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-run\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.257365 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildworkdir\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.257384 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-ca-bundles\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.257399 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-proxy-ca-bundles\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.257428 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-root\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.257480 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-push\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.257495 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-node-pullsecrets\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.257509 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-system-configs\") pod \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\" (UID: \"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a\") " Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.257697 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-wjwcn\" (UniqueName: \"kubernetes.io/projected/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-kube-api-access-wjwcn\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.258267 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.258264 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-system-configs" (OuterVolumeSpecName: "build-system-configs") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "build-system-configs". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.259294 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-ca-bundles" (OuterVolumeSpecName: "build-ca-bundles") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "build-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.259518 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-proxy-ca-bundles" (OuterVolumeSpecName: "build-proxy-ca-bundles") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "build-proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.259647 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildcachedir" (OuterVolumeSpecName: "buildcachedir") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "buildcachedir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.259888 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-run" (OuterVolumeSpecName: "container-storage-run") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "container-storage-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.281899 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-pull" (OuterVolumeSpecName: "builder-dockercfg-5d5px-pull") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "builder-dockercfg-5d5px-pull". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.281910 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-push" (OuterVolumeSpecName: "builder-dockercfg-5d5px-push") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "builder-dockercfg-5d5px-push". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.302722 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildworkdir" (OuterVolumeSpecName: "buildworkdir") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "buildworkdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.349466 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-blob-cache" (OuterVolumeSpecName: "build-blob-cache") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "build-blob-cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.359112 4875 reconciler_common.go:293] "Volume detached for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-blob-cache\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.359152 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-push\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.359166 4875 reconciler_common.go:293] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.359177 4875 reconciler_common.go:293] "Volume detached for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-system-configs\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.359188 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-builder-dockercfg-5d5px-pull\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.359200 4875 reconciler_common.go:293] "Volume detached for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildcachedir\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.359210 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-run\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.359221 4875 reconciler_common.go:293] "Volume detached for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-buildworkdir\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.359232 4875 reconciler_common.go:293] "Volume detached for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.359243 4875 reconciler_common.go:293] "Volume detached for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-build-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.879745 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/service-telemetry-operator-2-build" event={"ID":"9c08b0e5-f81f-4f46-a1da-40a7dd548a6a","Type":"ContainerDied","Data":"a10558d247611a8dc9e82a62403989b2c8f2eea39f787f7329ec279336e6ae92"} Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.879790 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="a10558d247611a8dc9e82a62403989b2c8f2eea39f787f7329ec279336e6ae92" Nov 26 00:23:32 crc kubenswrapper[4875]: I1126 00:23:32.879860 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/service-telemetry-operator-2-build" Nov 26 00:23:34 crc kubenswrapper[4875]: I1126 00:23:34.267231 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-root" (OuterVolumeSpecName: "container-storage-root") pod "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" (UID: "9c08b0e5-f81f-4f46-a1da-40a7dd548a6a"). InnerVolumeSpecName "container-storage-root". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:23:34 crc kubenswrapper[4875]: I1126 00:23:34.281524 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/9c08b0e5-f81f-4f46-a1da-40a7dd548a6a-container-storage-root\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.205837 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/smart-gateway-operator-1-build"] Nov 26 00:23:37 crc kubenswrapper[4875]: E1126 00:23:37.206394 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" containerName="manage-dockerfile" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.206428 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" containerName="manage-dockerfile" Nov 26 00:23:37 crc kubenswrapper[4875]: E1126 00:23:37.206444 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" containerName="git-clone" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.206452 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" containerName="git-clone" Nov 26 00:23:37 crc kubenswrapper[4875]: E1126 00:23:37.206465 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" containerName="docker-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.206474 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" containerName="docker-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.206602 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="9c08b0e5-f81f-4f46-a1da-40a7dd548a6a" containerName="docker-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.207185 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.209134 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"builder-dockercfg-5d5px" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.209358 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"smart-gateway-operator-1-global-ca" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.210075 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"smart-gateway-operator-1-ca" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.210327 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"smart-gateway-operator-1-sys-config" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.232392 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/smart-gateway-operator-1-build"] Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.325035 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-run\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.325080 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-buildworkdir\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.325104 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-ca-bundles\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.325122 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-mp2f7\" (UniqueName: \"kubernetes.io/projected/36bca648-f3d3-429a-8549-d02890cbf334-kube-api-access-mp2f7\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.325143 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-system-configs\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.325162 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-build-blob-cache\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.325197 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-buildcachedir\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.325214 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-root\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.325764 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-node-pullsecrets\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.325929 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-proxy-ca-bundles\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.326079 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-push\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.326136 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-pull\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427146 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-ca-bundles\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427206 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-mp2f7\" (UniqueName: \"kubernetes.io/projected/36bca648-f3d3-429a-8549-d02890cbf334-kube-api-access-mp2f7\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427227 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-system-configs\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427245 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-build-blob-cache\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427277 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-buildcachedir\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427294 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-root\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427314 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-node-pullsecrets\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427338 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-proxy-ca-bundles\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427369 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-push\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427385 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-pull\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427428 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-run\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427444 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-buildworkdir\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427692 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-buildcachedir\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427798 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-node-pullsecrets\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427806 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-build-blob-cache\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.427934 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-root\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.428142 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-buildworkdir\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.428308 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-ca-bundles\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.428313 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-system-configs\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.442902 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-proxy-ca-bundles\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.873674 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-run\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.874486 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-pull\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.874483 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-push\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:37 crc kubenswrapper[4875]: I1126 00:23:37.877169 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-mp2f7\" (UniqueName: \"kubernetes.io/projected/36bca648-f3d3-429a-8549-d02890cbf334-kube-api-access-mp2f7\") pod \"smart-gateway-operator-1-build\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:38 crc kubenswrapper[4875]: I1126 00:23:38.122039 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:38 crc kubenswrapper[4875]: I1126 00:23:38.552165 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/smart-gateway-operator-1-build"] Nov 26 00:23:38 crc kubenswrapper[4875]: E1126 00:23:38.691316 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:23:38 crc kubenswrapper[4875]: I1126 00:23:38.914757 4875 generic.go:334] "Generic (PLEG): container finished" podID="36bca648-f3d3-429a-8549-d02890cbf334" containerID="f9993430c3684be8b9ffd4e0bd56f59e11f0ea68c9a5fbf75ec00aeb1a6ef7c4" exitCode=0 Nov 26 00:23:38 crc kubenswrapper[4875]: I1126 00:23:38.914904 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-1-build" event={"ID":"36bca648-f3d3-429a-8549-d02890cbf334","Type":"ContainerDied","Data":"f9993430c3684be8b9ffd4e0bd56f59e11f0ea68c9a5fbf75ec00aeb1a6ef7c4"} Nov 26 00:23:38 crc kubenswrapper[4875]: I1126 00:23:38.915047 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-1-build" event={"ID":"36bca648-f3d3-429a-8549-d02890cbf334","Type":"ContainerStarted","Data":"e36c33b499e2cd2025008dcae4abe8200a452312988245228a22734571090b3c"} Nov 26 00:23:39 crc kubenswrapper[4875]: I1126 00:23:39.921722 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-1-build" event={"ID":"36bca648-f3d3-429a-8549-d02890cbf334","Type":"ContainerStarted","Data":"51179a8dde76acdf1d4a22a3b47bebcde3afb99146dad3931882ebe63b6bfd9b"} Nov 26 00:23:39 crc kubenswrapper[4875]: I1126 00:23:39.946866 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/smart-gateway-operator-1-build" podStartSLOduration=2.946849961 podStartE2EDuration="2.946849961s" podCreationTimestamp="2025-11-26 00:23:37 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:23:39.944704692 +0000 UTC m=+993.134680407" watchObservedRunningTime="2025-11-26 00:23:39.946849961 +0000 UTC m=+993.136825656" Nov 26 00:23:47 crc kubenswrapper[4875]: I1126 00:23:47.717034 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["service-telemetry/smart-gateway-operator-1-build"] Nov 26 00:23:47 crc kubenswrapper[4875]: I1126 00:23:47.719081 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="service-telemetry/smart-gateway-operator-1-build" podUID="36bca648-f3d3-429a-8549-d02890cbf334" containerName="docker-build" containerID="cri-o://51179a8dde76acdf1d4a22a3b47bebcde3afb99146dad3931882ebe63b6bfd9b" gracePeriod=30 Nov 26 00:23:48 crc kubenswrapper[4875]: E1126 00:23:48.814856 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.401251 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/smart-gateway-operator-2-build"] Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.402333 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.405764 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"smart-gateway-operator-2-ca" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.406015 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"smart-gateway-operator-2-sys-config" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.411129 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"smart-gateway-operator-2-global-ca" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.424191 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/smart-gateway-operator-2-build"] Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581314 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-proxy-ca-bundles\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581370 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-run\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581476 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-build-blob-cache\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581506 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-gnfz4\" (UniqueName: \"kubernetes.io/projected/8490ed0f-10c5-4aa5-9994-34886756816a-kube-api-access-gnfz4\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581532 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-ca-bundles\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581566 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-node-pullsecrets\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581587 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-push\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581612 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-pull\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581634 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-root\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581666 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-buildworkdir\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581778 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-buildcachedir\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.581812 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-system-configs\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.682661 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-node-pullsecrets\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.682704 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-push\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.682723 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-pull\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.682740 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-root\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.682766 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-buildworkdir\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.682802 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-node-pullsecrets\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.682816 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-buildcachedir\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.682832 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-system-configs\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.682991 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-buildcachedir\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.683061 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-proxy-ca-bundles\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.683124 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-run\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.683163 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-build-blob-cache\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.683201 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-gnfz4\" (UniqueName: \"kubernetes.io/projected/8490ed0f-10c5-4aa5-9994-34886756816a-kube-api-access-gnfz4\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.683219 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-ca-bundles\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.683299 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-root\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.683459 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-buildworkdir\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.683550 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-build-blob-cache\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.683873 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-run\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.683992 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-system-configs\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.684323 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-proxy-ca-bundles\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.684351 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-ca-bundles\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.690845 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-push\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.690895 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-pull\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.698927 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-gnfz4\" (UniqueName: \"kubernetes.io/projected/8490ed0f-10c5-4aa5-9994-34886756816a-kube-api-access-gnfz4\") pod \"smart-gateway-operator-2-build\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:49 crc kubenswrapper[4875]: I1126 00:23:49.722673 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:23:50 crc kubenswrapper[4875]: I1126 00:23:50.146901 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/smart-gateway-operator-2-build"] Nov 26 00:23:50 crc kubenswrapper[4875]: I1126 00:23:50.975498 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-2-build" event={"ID":"8490ed0f-10c5-4aa5-9994-34886756816a","Type":"ContainerStarted","Data":"afba9b989ea4f7465afaaf4aea1c792dc625eb4eb64f36f56b0016ecc3fe59fb"} Nov 26 00:23:50 crc kubenswrapper[4875]: I1126 00:23:50.975780 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-2-build" event={"ID":"8490ed0f-10c5-4aa5-9994-34886756816a","Type":"ContainerStarted","Data":"51e7a762ae4764299df07946c4f463b16c96c25438e8e366695ffa26df931ba7"} Nov 26 00:23:51 crc kubenswrapper[4875]: I1126 00:23:51.983328 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_smart-gateway-operator-1-build_36bca648-f3d3-429a-8549-d02890cbf334/docker-build/0.log" Nov 26 00:23:51 crc kubenswrapper[4875]: I1126 00:23:51.983949 4875 generic.go:334] "Generic (PLEG): container finished" podID="36bca648-f3d3-429a-8549-d02890cbf334" containerID="51179a8dde76acdf1d4a22a3b47bebcde3afb99146dad3931882ebe63b6bfd9b" exitCode=1 Nov 26 00:23:51 crc kubenswrapper[4875]: I1126 00:23:51.983981 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-1-build" event={"ID":"36bca648-f3d3-429a-8549-d02890cbf334","Type":"ContainerDied","Data":"51179a8dde76acdf1d4a22a3b47bebcde3afb99146dad3931882ebe63b6bfd9b"} Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.387683 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_smart-gateway-operator-1-build_36bca648-f3d3-429a-8549-d02890cbf334/docker-build/0.log" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.388002 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.518644 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-mp2f7\" (UniqueName: \"kubernetes.io/projected/36bca648-f3d3-429a-8549-d02890cbf334-kube-api-access-mp2f7\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.518736 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-push\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.518806 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-run\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.518828 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-buildworkdir\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.518902 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-pull\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.518932 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-root\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.518955 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-system-configs\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.518992 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-node-pullsecrets\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.519017 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-proxy-ca-bundles\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.519045 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-build-blob-cache\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.519073 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-buildcachedir\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.519083 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.519096 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-ca-bundles\") pod \"36bca648-f3d3-429a-8549-d02890cbf334\" (UID: \"36bca648-f3d3-429a-8549-d02890cbf334\") " Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.519407 4875 reconciler_common.go:293] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.519704 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-buildworkdir" (OuterVolumeSpecName: "buildworkdir") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "buildworkdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.519741 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-buildcachedir" (OuterVolumeSpecName: "buildcachedir") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "buildcachedir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.519883 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-proxy-ca-bundles" (OuterVolumeSpecName: "build-proxy-ca-bundles") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "build-proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.519917 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-system-configs" (OuterVolumeSpecName: "build-system-configs") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "build-system-configs". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.519930 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-ca-bundles" (OuterVolumeSpecName: "build-ca-bundles") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "build-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.520280 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-run" (OuterVolumeSpecName: "container-storage-run") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "container-storage-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.524032 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36bca648-f3d3-429a-8549-d02890cbf334-kube-api-access-mp2f7" (OuterVolumeSpecName: "kube-api-access-mp2f7") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "kube-api-access-mp2f7". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.524045 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-pull" (OuterVolumeSpecName: "builder-dockercfg-5d5px-pull") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "builder-dockercfg-5d5px-pull". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.524065 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-push" (OuterVolumeSpecName: "builder-dockercfg-5d5px-push") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "builder-dockercfg-5d5px-push". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.620250 4875 reconciler_common.go:293] "Volume detached for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.620286 4875 reconciler_common.go:293] "Volume detached for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/36bca648-f3d3-429a-8549-d02890cbf334-buildcachedir\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.620303 4875 reconciler_common.go:293] "Volume detached for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.620316 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-mp2f7\" (UniqueName: \"kubernetes.io/projected/36bca648-f3d3-429a-8549-d02890cbf334-kube-api-access-mp2f7\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.620327 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-push\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.620338 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-run\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.620348 4875 reconciler_common.go:293] "Volume detached for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-buildworkdir\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.620359 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/36bca648-f3d3-429a-8549-d02890cbf334-builder-dockercfg-5d5px-pull\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.620369 4875 reconciler_common.go:293] "Volume detached for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/36bca648-f3d3-429a-8549-d02890cbf334-build-system-configs\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.655634 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-build-blob-cache" (OuterVolumeSpecName: "build-blob-cache") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "build-blob-cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.721162 4875 reconciler_common.go:293] "Volume detached for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-build-blob-cache\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.940318 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-root" (OuterVolumeSpecName: "container-storage-root") pod "36bca648-f3d3-429a-8549-d02890cbf334" (UID: "36bca648-f3d3-429a-8549-d02890cbf334"). InnerVolumeSpecName "container-storage-root". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.990170 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_smart-gateway-operator-1-build_36bca648-f3d3-429a-8549-d02890cbf334/docker-build/0.log" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.990550 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/smart-gateway-operator-1-build" Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.990550 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-1-build" event={"ID":"36bca648-f3d3-429a-8549-d02890cbf334","Type":"ContainerDied","Data":"e36c33b499e2cd2025008dcae4abe8200a452312988245228a22734571090b3c"} Nov 26 00:23:52 crc kubenswrapper[4875]: I1126 00:23:52.990596 4875 scope.go:117] "RemoveContainer" containerID="51179a8dde76acdf1d4a22a3b47bebcde3afb99146dad3931882ebe63b6bfd9b" Nov 26 00:23:53 crc kubenswrapper[4875]: I1126 00:23:53.029069 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/36bca648-f3d3-429a-8549-d02890cbf334-container-storage-root\") on node \"crc\" DevicePath \"\"" Nov 26 00:23:53 crc kubenswrapper[4875]: I1126 00:23:53.037830 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["service-telemetry/smart-gateway-operator-1-build"] Nov 26 00:23:53 crc kubenswrapper[4875]: I1126 00:23:53.047732 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["service-telemetry/smart-gateway-operator-1-build"] Nov 26 00:23:53 crc kubenswrapper[4875]: I1126 00:23:53.080475 4875 scope.go:117] "RemoveContainer" containerID="f9993430c3684be8b9ffd4e0bd56f59e11f0ea68c9a5fbf75ec00aeb1a6ef7c4" Nov 26 00:23:53 crc kubenswrapper[4875]: I1126 00:23:53.537970 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36bca648-f3d3-429a-8549-d02890cbf334" path="/var/lib/kubelet/pods/36bca648-f3d3-429a-8549-d02890cbf334/volumes" Nov 26 00:23:53 crc kubenswrapper[4875]: I1126 00:23:53.998394 4875 generic.go:334] "Generic (PLEG): container finished" podID="8490ed0f-10c5-4aa5-9994-34886756816a" containerID="afba9b989ea4f7465afaaf4aea1c792dc625eb4eb64f36f56b0016ecc3fe59fb" exitCode=0 Nov 26 00:23:53 crc kubenswrapper[4875]: I1126 00:23:53.998449 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-2-build" event={"ID":"8490ed0f-10c5-4aa5-9994-34886756816a","Type":"ContainerDied","Data":"afba9b989ea4f7465afaaf4aea1c792dc625eb4eb64f36f56b0016ecc3fe59fb"} Nov 26 00:23:55 crc kubenswrapper[4875]: I1126 00:23:55.006617 4875 generic.go:334] "Generic (PLEG): container finished" podID="8490ed0f-10c5-4aa5-9994-34886756816a" containerID="bc6454d867f29fa1ec35d8e9d086b1818492fd20ce4c03ecb4e9d8b99ef392d7" exitCode=0 Nov 26 00:23:55 crc kubenswrapper[4875]: I1126 00:23:55.006685 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-2-build" event={"ID":"8490ed0f-10c5-4aa5-9994-34886756816a","Type":"ContainerDied","Data":"bc6454d867f29fa1ec35d8e9d086b1818492fd20ce4c03ecb4e9d8b99ef392d7"} Nov 26 00:23:55 crc kubenswrapper[4875]: I1126 00:23:55.042890 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_smart-gateway-operator-2-build_8490ed0f-10c5-4aa5-9994-34886756816a/manage-dockerfile/0.log" Nov 26 00:23:56 crc kubenswrapper[4875]: I1126 00:23:56.014513 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-2-build" event={"ID":"8490ed0f-10c5-4aa5-9994-34886756816a","Type":"ContainerStarted","Data":"3018be65f8805c7215e63627e7e5e0c2cd08a78e0c5bd8b4f3a4a9a5a8c97ea4"} Nov 26 00:23:56 crc kubenswrapper[4875]: I1126 00:23:56.039176 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/smart-gateway-operator-2-build" podStartSLOduration=7.039158348 podStartE2EDuration="7.039158348s" podCreationTimestamp="2025-11-26 00:23:49 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:23:56.038328696 +0000 UTC m=+1009.228304391" watchObservedRunningTime="2025-11-26 00:23:56.039158348 +0000 UTC m=+1009.229134043" Nov 26 00:23:58 crc kubenswrapper[4875]: E1126 00:23:58.941750 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:25:07 crc kubenswrapper[4875]: I1126 00:25:07.511809 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:25:07 crc kubenswrapper[4875]: I1126 00:25:07.512339 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:25:15 crc kubenswrapper[4875]: I1126 00:25:15.457452 4875 generic.go:334] "Generic (PLEG): container finished" podID="8490ed0f-10c5-4aa5-9994-34886756816a" containerID="3018be65f8805c7215e63627e7e5e0c2cd08a78e0c5bd8b4f3a4a9a5a8c97ea4" exitCode=0 Nov 26 00:25:15 crc kubenswrapper[4875]: I1126 00:25:15.457528 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-2-build" event={"ID":"8490ed0f-10c5-4aa5-9994-34886756816a","Type":"ContainerDied","Data":"3018be65f8805c7215e63627e7e5e0c2cd08a78e0c5bd8b4f3a4a9a5a8c97ea4"} Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.706234 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.850202 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-buildworkdir\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.850265 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-pull\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.850328 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-system-configs\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.850347 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-push\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.850375 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-gnfz4\" (UniqueName: \"kubernetes.io/projected/8490ed0f-10c5-4aa5-9994-34886756816a-kube-api-access-gnfz4\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.850442 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-run\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.850683 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-buildcachedir" (OuterVolumeSpecName: "buildcachedir") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "buildcachedir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.851176 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-system-configs" (OuterVolumeSpecName: "build-system-configs") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "build-system-configs". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.851390 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-run" (OuterVolumeSpecName: "container-storage-run") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "container-storage-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.850502 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-buildcachedir\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.851584 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-build-blob-cache\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.851608 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-node-pullsecrets\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.851749 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.854197 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-buildworkdir" (OuterVolumeSpecName: "buildworkdir") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "buildworkdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.855400 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-pull" (OuterVolumeSpecName: "builder-dockercfg-5d5px-pull") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "builder-dockercfg-5d5px-pull". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.855722 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-push" (OuterVolumeSpecName: "builder-dockercfg-5d5px-push") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "builder-dockercfg-5d5px-push". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.857349 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8490ed0f-10c5-4aa5-9994-34886756816a-kube-api-access-gnfz4" (OuterVolumeSpecName: "kube-api-access-gnfz4") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "kube-api-access-gnfz4". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.861586 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-root\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.861662 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-proxy-ca-bundles\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.861724 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-ca-bundles\") pod \"8490ed0f-10c5-4aa5-9994-34886756816a\" (UID: \"8490ed0f-10c5-4aa5-9994-34886756816a\") " Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862028 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-proxy-ca-bundles" (OuterVolumeSpecName: "build-proxy-ca-bundles") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "build-proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862296 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-ca-bundles" (OuterVolumeSpecName: "build-ca-bundles") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "build-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862762 4875 reconciler_common.go:293] "Volume detached for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-system-configs\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862788 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-push\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862801 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-gnfz4\" (UniqueName: \"kubernetes.io/projected/8490ed0f-10c5-4aa5-9994-34886756816a-kube-api-access-gnfz4\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862813 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-run\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862825 4875 reconciler_common.go:293] "Volume detached for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-buildcachedir\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862836 4875 reconciler_common.go:293] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/8490ed0f-10c5-4aa5-9994-34886756816a-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862849 4875 reconciler_common.go:293] "Volume detached for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862862 4875 reconciler_common.go:293] "Volume detached for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/8490ed0f-10c5-4aa5-9994-34886756816a-build-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862874 4875 reconciler_common.go:293] "Volume detached for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-buildworkdir\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:16 crc kubenswrapper[4875]: I1126 00:25:16.862885 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/8490ed0f-10c5-4aa5-9994-34886756816a-builder-dockercfg-5d5px-pull\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:17 crc kubenswrapper[4875]: I1126 00:25:17.470745 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/smart-gateway-operator-2-build" event={"ID":"8490ed0f-10c5-4aa5-9994-34886756816a","Type":"ContainerDied","Data":"51e7a762ae4764299df07946c4f463b16c96c25438e8e366695ffa26df931ba7"} Nov 26 00:25:17 crc kubenswrapper[4875]: I1126 00:25:17.470782 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="51e7a762ae4764299df07946c4f463b16c96c25438e8e366695ffa26df931ba7" Nov 26 00:25:17 crc kubenswrapper[4875]: I1126 00:25:17.470821 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/smart-gateway-operator-2-build" Nov 26 00:25:18 crc kubenswrapper[4875]: I1126 00:25:18.715892 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-build-blob-cache" (OuterVolumeSpecName: "build-blob-cache") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "build-blob-cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:25:18 crc kubenswrapper[4875]: I1126 00:25:18.785989 4875 reconciler_common.go:293] "Volume detached for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-build-blob-cache\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:18 crc kubenswrapper[4875]: I1126 00:25:18.829286 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-root" (OuterVolumeSpecName: "container-storage-root") pod "8490ed0f-10c5-4aa5-9994-34886756816a" (UID: "8490ed0f-10c5-4aa5-9994-34886756816a"). InnerVolumeSpecName "container-storage-root". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:25:18 crc kubenswrapper[4875]: I1126 00:25:18.887765 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/8490ed0f-10c5-4aa5-9994-34886756816a-container-storage-root\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.843346 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/sg-core-1-build"] Nov 26 00:25:21 crc kubenswrapper[4875]: E1126 00:25:21.843875 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36bca648-f3d3-429a-8549-d02890cbf334" containerName="docker-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.843887 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="36bca648-f3d3-429a-8549-d02890cbf334" containerName="docker-build" Nov 26 00:25:21 crc kubenswrapper[4875]: E1126 00:25:21.843898 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8490ed0f-10c5-4aa5-9994-34886756816a" containerName="git-clone" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.843904 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8490ed0f-10c5-4aa5-9994-34886756816a" containerName="git-clone" Nov 26 00:25:21 crc kubenswrapper[4875]: E1126 00:25:21.843916 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8490ed0f-10c5-4aa5-9994-34886756816a" containerName="manage-dockerfile" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.843924 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8490ed0f-10c5-4aa5-9994-34886756816a" containerName="manage-dockerfile" Nov 26 00:25:21 crc kubenswrapper[4875]: E1126 00:25:21.843934 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36bca648-f3d3-429a-8549-d02890cbf334" containerName="manage-dockerfile" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.843940 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="36bca648-f3d3-429a-8549-d02890cbf334" containerName="manage-dockerfile" Nov 26 00:25:21 crc kubenswrapper[4875]: E1126 00:25:21.843948 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8490ed0f-10c5-4aa5-9994-34886756816a" containerName="docker-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.843953 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8490ed0f-10c5-4aa5-9994-34886756816a" containerName="docker-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.844042 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="36bca648-f3d3-429a-8549-d02890cbf334" containerName="docker-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.844060 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8490ed0f-10c5-4aa5-9994-34886756816a" containerName="docker-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.844620 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.847326 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-core-1-ca" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.847341 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-core-1-sys-config" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.847732 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"builder-dockercfg-5d5px" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.847883 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-core-1-global-ca" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.857028 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/sg-core-1-build"] Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.923359 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildworkdir\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.923465 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-push\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.923490 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-pull\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.923518 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-system-configs\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.923566 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildcachedir\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.923713 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-proxy-ca-bundles\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.923837 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-run\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.923880 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-s6dp9\" (UniqueName: \"kubernetes.io/projected/5c6db9b3-ee71-4956-9091-5b8ff94acefc-kube-api-access-s6dp9\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.923908 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-node-pullsecrets\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.923935 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-ca-bundles\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.923993 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-root\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:21 crc kubenswrapper[4875]: I1126 00:25:21.924089 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-blob-cache\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.025018 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildworkdir\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.025452 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildworkdir\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.025880 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-pull\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.025950 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-push\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026119 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-system-configs\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026190 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildcachedir\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026212 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-proxy-ca-bundles\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026235 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-run\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026256 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-s6dp9\" (UniqueName: \"kubernetes.io/projected/5c6db9b3-ee71-4956-9091-5b8ff94acefc-kube-api-access-s6dp9\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026275 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-node-pullsecrets\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026290 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-ca-bundles\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026309 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-root\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026339 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-blob-cache\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026342 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildcachedir\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026614 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-blob-cache\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.026657 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-run\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.027004 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-root\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.027095 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-node-pullsecrets\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.027226 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-system-configs\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.027370 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-proxy-ca-bundles\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.027511 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-ca-bundles\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.032757 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-push\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.034679 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-pull\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.051514 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-s6dp9\" (UniqueName: \"kubernetes.io/projected/5c6db9b3-ee71-4956-9091-5b8ff94acefc-kube-api-access-s6dp9\") pod \"sg-core-1-build\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.161882 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-core-1-build" Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.357872 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/sg-core-1-build"] Nov 26 00:25:22 crc kubenswrapper[4875]: I1126 00:25:22.497245 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-1-build" event={"ID":"5c6db9b3-ee71-4956-9091-5b8ff94acefc","Type":"ContainerStarted","Data":"0aff73695f5119c974662c297d32ac1ba3a115e269915f508b906c4c604e2ea0"} Nov 26 00:25:23 crc kubenswrapper[4875]: I1126 00:25:23.505849 4875 generic.go:334] "Generic (PLEG): container finished" podID="5c6db9b3-ee71-4956-9091-5b8ff94acefc" containerID="1f60f9e58c8dee3fe885920a4636678e2b5073eabf43fab7d8a99ff9a61632b5" exitCode=0 Nov 26 00:25:23 crc kubenswrapper[4875]: I1126 00:25:23.505973 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-1-build" event={"ID":"5c6db9b3-ee71-4956-9091-5b8ff94acefc","Type":"ContainerDied","Data":"1f60f9e58c8dee3fe885920a4636678e2b5073eabf43fab7d8a99ff9a61632b5"} Nov 26 00:25:24 crc kubenswrapper[4875]: I1126 00:25:24.516020 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-1-build" event={"ID":"5c6db9b3-ee71-4956-9091-5b8ff94acefc","Type":"ContainerStarted","Data":"afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313"} Nov 26 00:25:24 crc kubenswrapper[4875]: I1126 00:25:24.537716 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/sg-core-1-build" podStartSLOduration=3.537696268 podStartE2EDuration="3.537696268s" podCreationTimestamp="2025-11-26 00:25:21 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:25:24.535575189 +0000 UTC m=+1097.725550894" watchObservedRunningTime="2025-11-26 00:25:24.537696268 +0000 UTC m=+1097.727671973" Nov 26 00:25:32 crc kubenswrapper[4875]: I1126 00:25:32.654351 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["service-telemetry/sg-core-1-build"] Nov 26 00:25:32 crc kubenswrapper[4875]: I1126 00:25:32.655203 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="service-telemetry/sg-core-1-build" podUID="5c6db9b3-ee71-4956-9091-5b8ff94acefc" containerName="docker-build" containerID="cri-o://afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313" gracePeriod=30 Nov 26 00:25:32 crc kubenswrapper[4875]: I1126 00:25:32.999185 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_sg-core-1-build_5c6db9b3-ee71-4956-9091-5b8ff94acefc/docker-build/0.log" Nov 26 00:25:32 crc kubenswrapper[4875]: I1126 00:25:32.999891 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-core-1-build" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170595 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-root\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170674 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-blob-cache\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170705 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-proxy-ca-bundles\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170725 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-run\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170750 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-pull\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170770 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-node-pullsecrets\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170814 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildworkdir\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170831 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildcachedir\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170853 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-s6dp9\" (UniqueName: \"kubernetes.io/projected/5c6db9b3-ee71-4956-9091-5b8ff94acefc-kube-api-access-s6dp9\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170878 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-push\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170899 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-ca-bundles\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.170926 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-system-configs\") pod \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\" (UID: \"5c6db9b3-ee71-4956-9091-5b8ff94acefc\") " Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.171292 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildcachedir" (OuterVolumeSpecName: "buildcachedir") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "buildcachedir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.171350 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.171747 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildworkdir" (OuterVolumeSpecName: "buildworkdir") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "buildworkdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.172069 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-proxy-ca-bundles" (OuterVolumeSpecName: "build-proxy-ca-bundles") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "build-proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.172085 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-ca-bundles" (OuterVolumeSpecName: "build-ca-bundles") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "build-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.172255 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-system-configs" (OuterVolumeSpecName: "build-system-configs") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "build-system-configs". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.172872 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-run" (OuterVolumeSpecName: "container-storage-run") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "container-storage-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.177376 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-push" (OuterVolumeSpecName: "builder-dockercfg-5d5px-push") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "builder-dockercfg-5d5px-push". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.178142 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-pull" (OuterVolumeSpecName: "builder-dockercfg-5d5px-pull") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "builder-dockercfg-5d5px-pull". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.184944 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/5c6db9b3-ee71-4956-9091-5b8ff94acefc-kube-api-access-s6dp9" (OuterVolumeSpecName: "kube-api-access-s6dp9") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "kube-api-access-s6dp9". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.270520 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-blob-cache" (OuterVolumeSpecName: "build-blob-cache") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "build-blob-cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.271974 4875 reconciler_common.go:293] "Volume detached for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.272042 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-run\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.272056 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-pull\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.272068 4875 reconciler_common.go:293] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.272081 4875 reconciler_common.go:293] "Volume detached for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildworkdir\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.272091 4875 reconciler_common.go:293] "Volume detached for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/5c6db9b3-ee71-4956-9091-5b8ff94acefc-buildcachedir\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.272100 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-s6dp9\" (UniqueName: \"kubernetes.io/projected/5c6db9b3-ee71-4956-9091-5b8ff94acefc-kube-api-access-s6dp9\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.272111 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/5c6db9b3-ee71-4956-9091-5b8ff94acefc-builder-dockercfg-5d5px-push\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.272121 4875 reconciler_common.go:293] "Volume detached for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.272132 4875 reconciler_common.go:293] "Volume detached for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-system-configs\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.373115 4875 reconciler_common.go:293] "Volume detached for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-build-blob-cache\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.575122 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_sg-core-1-build_5c6db9b3-ee71-4956-9091-5b8ff94acefc/docker-build/0.log" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.576015 4875 generic.go:334] "Generic (PLEG): container finished" podID="5c6db9b3-ee71-4956-9091-5b8ff94acefc" containerID="afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313" exitCode=1 Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.576059 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-1-build" event={"ID":"5c6db9b3-ee71-4956-9091-5b8ff94acefc","Type":"ContainerDied","Data":"afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313"} Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.576089 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-1-build" event={"ID":"5c6db9b3-ee71-4956-9091-5b8ff94acefc","Type":"ContainerDied","Data":"0aff73695f5119c974662c297d32ac1ba3a115e269915f508b906c4c604e2ea0"} Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.576108 4875 scope.go:117] "RemoveContainer" containerID="afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.576101 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-core-1-build" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.592799 4875 scope.go:117] "RemoveContainer" containerID="1f60f9e58c8dee3fe885920a4636678e2b5073eabf43fab7d8a99ff9a61632b5" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.611795 4875 scope.go:117] "RemoveContainer" containerID="afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313" Nov 26 00:25:33 crc kubenswrapper[4875]: E1126 00:25:33.612112 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313\": container with ID starting with afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313 not found: ID does not exist" containerID="afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.612143 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313"} err="failed to get container status \"afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313\": rpc error: code = NotFound desc = could not find container \"afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313\": container with ID starting with afc46da5adb9627e4077342a92c2746c7aede6cb31eb84184a077946621a8313 not found: ID does not exist" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.612167 4875 scope.go:117] "RemoveContainer" containerID="1f60f9e58c8dee3fe885920a4636678e2b5073eabf43fab7d8a99ff9a61632b5" Nov 26 00:25:33 crc kubenswrapper[4875]: E1126 00:25:33.612392 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1f60f9e58c8dee3fe885920a4636678e2b5073eabf43fab7d8a99ff9a61632b5\": container with ID starting with 1f60f9e58c8dee3fe885920a4636678e2b5073eabf43fab7d8a99ff9a61632b5 not found: ID does not exist" containerID="1f60f9e58c8dee3fe885920a4636678e2b5073eabf43fab7d8a99ff9a61632b5" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.612452 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1f60f9e58c8dee3fe885920a4636678e2b5073eabf43fab7d8a99ff9a61632b5"} err="failed to get container status \"1f60f9e58c8dee3fe885920a4636678e2b5073eabf43fab7d8a99ff9a61632b5\": rpc error: code = NotFound desc = could not find container \"1f60f9e58c8dee3fe885920a4636678e2b5073eabf43fab7d8a99ff9a61632b5\": container with ID starting with 1f60f9e58c8dee3fe885920a4636678e2b5073eabf43fab7d8a99ff9a61632b5 not found: ID does not exist" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.646361 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-root" (OuterVolumeSpecName: "container-storage-root") pod "5c6db9b3-ee71-4956-9091-5b8ff94acefc" (UID: "5c6db9b3-ee71-4956-9091-5b8ff94acefc"). InnerVolumeSpecName "container-storage-root". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.676632 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/5c6db9b3-ee71-4956-9091-5b8ff94acefc-container-storage-root\") on node \"crc\" DevicePath \"\"" Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.907239 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["service-telemetry/sg-core-1-build"] Nov 26 00:25:33 crc kubenswrapper[4875]: I1126 00:25:33.912386 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["service-telemetry/sg-core-1-build"] Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.428900 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/sg-core-2-build"] Nov 26 00:25:34 crc kubenswrapper[4875]: E1126 00:25:34.429251 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c6db9b3-ee71-4956-9091-5b8ff94acefc" containerName="manage-dockerfile" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.429283 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c6db9b3-ee71-4956-9091-5b8ff94acefc" containerName="manage-dockerfile" Nov 26 00:25:34 crc kubenswrapper[4875]: E1126 00:25:34.429311 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="5c6db9b3-ee71-4956-9091-5b8ff94acefc" containerName="docker-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.429324 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="5c6db9b3-ee71-4956-9091-5b8ff94acefc" containerName="docker-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.429538 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="5c6db9b3-ee71-4956-9091-5b8ff94acefc" containerName="docker-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.430880 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.432919 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"builder-dockercfg-5d5px" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.432947 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-core-2-sys-config" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.433598 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-core-2-global-ca" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.434748 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-core-2-ca" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.447302 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/sg-core-2-build"] Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589347 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-run\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589423 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-root\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589443 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-build-blob-cache\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589463 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-buildcachedir\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589552 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-push\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589682 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-pull\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589725 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-node-pullsecrets\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589772 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-d78hn\" (UniqueName: \"kubernetes.io/projected/087c98d0-b335-4596-9d27-c77d207bcda9-kube-api-access-d78hn\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589824 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-ca-bundles\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589912 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-buildworkdir\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589954 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-system-configs\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.589981 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-proxy-ca-bundles\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.690951 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-run\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691027 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-root\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691053 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-build-blob-cache\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691071 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-buildcachedir\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691101 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-push\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691124 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-pull\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691148 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-node-pullsecrets\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691170 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-d78hn\" (UniqueName: \"kubernetes.io/projected/087c98d0-b335-4596-9d27-c77d207bcda9-kube-api-access-d78hn\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691247 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-buildcachedir\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691304 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-node-pullsecrets\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691332 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-ca-bundles\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691371 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-buildworkdir\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691396 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-system-configs\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691726 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-build-blob-cache\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691837 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-run\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691937 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-root\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.691989 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-proxy-ca-bundles\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.692020 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-buildworkdir\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.692546 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-system-configs\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.692699 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-proxy-ca-bundles\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.692765 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-ca-bundles\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.695919 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-push\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.703593 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-pull\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.708823 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-d78hn\" (UniqueName: \"kubernetes.io/projected/087c98d0-b335-4596-9d27-c77d207bcda9-kube-api-access-d78hn\") pod \"sg-core-2-build\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " pod="service-telemetry/sg-core-2-build" Nov 26 00:25:34 crc kubenswrapper[4875]: I1126 00:25:34.749662 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-core-2-build" Nov 26 00:25:35 crc kubenswrapper[4875]: I1126 00:25:35.147890 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/sg-core-2-build"] Nov 26 00:25:35 crc kubenswrapper[4875]: I1126 00:25:35.541476 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="5c6db9b3-ee71-4956-9091-5b8ff94acefc" path="/var/lib/kubelet/pods/5c6db9b3-ee71-4956-9091-5b8ff94acefc/volumes" Nov 26 00:25:35 crc kubenswrapper[4875]: I1126 00:25:35.589502 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-2-build" event={"ID":"087c98d0-b335-4596-9d27-c77d207bcda9","Type":"ContainerStarted","Data":"cbbe5c50e3faf6eebb4f7bbcb9792ea6dc0bc188eb7567d01cc7ad46e648c5f5"} Nov 26 00:25:35 crc kubenswrapper[4875]: I1126 00:25:35.589561 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-2-build" event={"ID":"087c98d0-b335-4596-9d27-c77d207bcda9","Type":"ContainerStarted","Data":"c92f5ae7741a87ab4110fedc9d224418f8d3c89a71da5a6c0ec1487fc05b9ff3"} Nov 26 00:25:36 crc kubenswrapper[4875]: I1126 00:25:36.596707 4875 generic.go:334] "Generic (PLEG): container finished" podID="087c98d0-b335-4596-9d27-c77d207bcda9" containerID="cbbe5c50e3faf6eebb4f7bbcb9792ea6dc0bc188eb7567d01cc7ad46e648c5f5" exitCode=0 Nov 26 00:25:36 crc kubenswrapper[4875]: I1126 00:25:36.596765 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-2-build" event={"ID":"087c98d0-b335-4596-9d27-c77d207bcda9","Type":"ContainerDied","Data":"cbbe5c50e3faf6eebb4f7bbcb9792ea6dc0bc188eb7567d01cc7ad46e648c5f5"} Nov 26 00:25:37 crc kubenswrapper[4875]: I1126 00:25:37.511822 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:25:37 crc kubenswrapper[4875]: I1126 00:25:37.511893 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:25:37 crc kubenswrapper[4875]: I1126 00:25:37.603952 4875 generic.go:334] "Generic (PLEG): container finished" podID="087c98d0-b335-4596-9d27-c77d207bcda9" containerID="8a6881a4292797a92a481c5a9f77aafe9c3bb9249a32871773f80d9f5b303895" exitCode=0 Nov 26 00:25:37 crc kubenswrapper[4875]: I1126 00:25:37.603991 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-2-build" event={"ID":"087c98d0-b335-4596-9d27-c77d207bcda9","Type":"ContainerDied","Data":"8a6881a4292797a92a481c5a9f77aafe9c3bb9249a32871773f80d9f5b303895"} Nov 26 00:25:37 crc kubenswrapper[4875]: I1126 00:25:37.645391 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_sg-core-2-build_087c98d0-b335-4596-9d27-c77d207bcda9/manage-dockerfile/0.log" Nov 26 00:25:38 crc kubenswrapper[4875]: I1126 00:25:38.612805 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-2-build" event={"ID":"087c98d0-b335-4596-9d27-c77d207bcda9","Type":"ContainerStarted","Data":"3a1d485b1d5a43df016ed69bbe285367a4d6eadef3914effe2e785d9baea576b"} Nov 26 00:25:38 crc kubenswrapper[4875]: I1126 00:25:38.644405 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/sg-core-2-build" podStartSLOduration=4.644377627 podStartE2EDuration="4.644377627s" podCreationTimestamp="2025-11-26 00:25:34 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:25:38.63968584 +0000 UTC m=+1111.829661535" watchObservedRunningTime="2025-11-26 00:25:38.644377627 +0000 UTC m=+1111.834353322" Nov 26 00:26:07 crc kubenswrapper[4875]: I1126 00:26:07.511870 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:26:07 crc kubenswrapper[4875]: I1126 00:26:07.512320 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:26:07 crc kubenswrapper[4875]: I1126 00:26:07.512360 4875 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:26:07 crc kubenswrapper[4875]: I1126 00:26:07.512942 4875 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"924f375e812a0719e7bc1f7b009dabf2f368d974e2fd9a554bfc2a38ef6023a5"} pod="openshift-machine-config-operator/machine-config-daemon-9mztb" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 00:26:07 crc kubenswrapper[4875]: I1126 00:26:07.513002 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" containerID="cri-o://924f375e812a0719e7bc1f7b009dabf2f368d974e2fd9a554bfc2a38ef6023a5" gracePeriod=600 Nov 26 00:26:07 crc kubenswrapper[4875]: I1126 00:26:07.788160 4875 generic.go:334] "Generic (PLEG): container finished" podID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerID="924f375e812a0719e7bc1f7b009dabf2f368d974e2fd9a554bfc2a38ef6023a5" exitCode=0 Nov 26 00:26:07 crc kubenswrapper[4875]: I1126 00:26:07.788857 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerDied","Data":"924f375e812a0719e7bc1f7b009dabf2f368d974e2fd9a554bfc2a38ef6023a5"} Nov 26 00:26:07 crc kubenswrapper[4875]: I1126 00:26:07.788968 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerStarted","Data":"9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4"} Nov 26 00:26:07 crc kubenswrapper[4875]: I1126 00:26:07.789065 4875 scope.go:117] "RemoveContainer" containerID="46c47342b308d949e370e86db45ad1b54d496a9856e0ad49658508d0aa116ac9" Nov 26 00:28:07 crc kubenswrapper[4875]: I1126 00:28:07.511492 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:28:07 crc kubenswrapper[4875]: I1126 00:28:07.512025 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:28:35 crc kubenswrapper[4875]: I1126 00:28:35.689258 4875 generic.go:334] "Generic (PLEG): container finished" podID="087c98d0-b335-4596-9d27-c77d207bcda9" containerID="3a1d485b1d5a43df016ed69bbe285367a4d6eadef3914effe2e785d9baea576b" exitCode=0 Nov 26 00:28:35 crc kubenswrapper[4875]: I1126 00:28:35.689331 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-2-build" event={"ID":"087c98d0-b335-4596-9d27-c77d207bcda9","Type":"ContainerDied","Data":"3a1d485b1d5a43df016ed69bbe285367a4d6eadef3914effe2e785d9baea576b"} Nov 26 00:28:36 crc kubenswrapper[4875]: I1126 00:28:36.938539 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-core-2-build" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.114841 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-ca-bundles\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.114895 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-d78hn\" (UniqueName: \"kubernetes.io/projected/087c98d0-b335-4596-9d27-c77d207bcda9-kube-api-access-d78hn\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.114916 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-system-configs\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.114935 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-proxy-ca-bundles\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.114953 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-build-blob-cache\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.114967 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-buildworkdir\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.114995 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-root\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.115013 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-buildcachedir\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.115032 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-pull\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.115052 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-run\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.115083 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-node-pullsecrets\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.115101 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-push\") pod \"087c98d0-b335-4596-9d27-c77d207bcda9\" (UID: \"087c98d0-b335-4596-9d27-c77d207bcda9\") " Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.115716 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.115795 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-buildcachedir" (OuterVolumeSpecName: "buildcachedir") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "buildcachedir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.116439 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-run" (OuterVolumeSpecName: "container-storage-run") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "container-storage-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.117512 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-ca-bundles" (OuterVolumeSpecName: "build-ca-bundles") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "build-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.117579 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-proxy-ca-bundles" (OuterVolumeSpecName: "build-proxy-ca-bundles") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "build-proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.118395 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-system-configs" (OuterVolumeSpecName: "build-system-configs") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "build-system-configs". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.122796 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/087c98d0-b335-4596-9d27-c77d207bcda9-kube-api-access-d78hn" (OuterVolumeSpecName: "kube-api-access-d78hn") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "kube-api-access-d78hn". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.123338 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-pull" (OuterVolumeSpecName: "builder-dockercfg-5d5px-pull") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "builder-dockercfg-5d5px-pull". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.123367 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-push" (OuterVolumeSpecName: "builder-dockercfg-5d5px-push") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "builder-dockercfg-5d5px-push". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.133093 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-buildworkdir" (OuterVolumeSpecName: "buildworkdir") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "buildworkdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.215740 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-run\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.215775 4875 reconciler_common.go:293] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.215784 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-push\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.215793 4875 reconciler_common.go:293] "Volume detached for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.215802 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-d78hn\" (UniqueName: \"kubernetes.io/projected/087c98d0-b335-4596-9d27-c77d207bcda9-kube-api-access-d78hn\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.215812 4875 reconciler_common.go:293] "Volume detached for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-system-configs\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.215819 4875 reconciler_common.go:293] "Volume detached for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/087c98d0-b335-4596-9d27-c77d207bcda9-build-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.215828 4875 reconciler_common.go:293] "Volume detached for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-buildworkdir\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.215837 4875 reconciler_common.go:293] "Volume detached for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/087c98d0-b335-4596-9d27-c77d207bcda9-buildcachedir\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.215848 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/087c98d0-b335-4596-9d27-c77d207bcda9-builder-dockercfg-5d5px-pull\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.447330 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-build-blob-cache" (OuterVolumeSpecName: "build-blob-cache") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "build-blob-cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.511955 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.512017 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.518726 4875 reconciler_common.go:293] "Volume detached for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-build-blob-cache\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.704433 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-core-2-build" event={"ID":"087c98d0-b335-4596-9d27-c77d207bcda9","Type":"ContainerDied","Data":"c92f5ae7741a87ab4110fedc9d224418f8d3c89a71da5a6c0ec1487fc05b9ff3"} Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.704465 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-core-2-build" Nov 26 00:28:37 crc kubenswrapper[4875]: I1126 00:28:37.704480 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="c92f5ae7741a87ab4110fedc9d224418f8d3c89a71da5a6c0ec1487fc05b9ff3" Nov 26 00:28:39 crc kubenswrapper[4875]: I1126 00:28:39.319016 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-root" (OuterVolumeSpecName: "container-storage-root") pod "087c98d0-b335-4596-9d27-c77d207bcda9" (UID: "087c98d0-b335-4596-9d27-c77d207bcda9"). InnerVolumeSpecName "container-storage-root". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:28:39 crc kubenswrapper[4875]: I1126 00:28:39.341278 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/087c98d0-b335-4596-9d27-c77d207bcda9-container-storage-root\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.967382 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/sg-bridge-1-build"] Nov 26 00:28:41 crc kubenswrapper[4875]: E1126 00:28:41.967737 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="087c98d0-b335-4596-9d27-c77d207bcda9" containerName="git-clone" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.967752 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="087c98d0-b335-4596-9d27-c77d207bcda9" containerName="git-clone" Nov 26 00:28:41 crc kubenswrapper[4875]: E1126 00:28:41.967768 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="087c98d0-b335-4596-9d27-c77d207bcda9" containerName="manage-dockerfile" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.967777 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="087c98d0-b335-4596-9d27-c77d207bcda9" containerName="manage-dockerfile" Nov 26 00:28:41 crc kubenswrapper[4875]: E1126 00:28:41.967800 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="087c98d0-b335-4596-9d27-c77d207bcda9" containerName="docker-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.967806 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="087c98d0-b335-4596-9d27-c77d207bcda9" containerName="docker-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.967937 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="087c98d0-b335-4596-9d27-c77d207bcda9" containerName="docker-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.968872 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971083 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-system-configs\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971095 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-bridge-1-ca" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971172 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-buildworkdir\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971183 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"builder-dockercfg-5d5px" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971213 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-proxy-ca-bundles\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971238 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-root\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971266 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-fjhqs\" (UniqueName: \"kubernetes.io/projected/dfdf3163-9935-424d-a190-789cf2321c76-kube-api-access-fjhqs\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971112 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-bridge-1-global-ca" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971325 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-build-blob-cache\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971498 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-node-pullsecrets\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971579 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-push\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971612 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-pull\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971681 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-ca-bundles\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971703 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-run\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971736 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-buildcachedir\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.971848 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-bridge-1-sys-config" Nov 26 00:28:41 crc kubenswrapper[4875]: I1126 00:28:41.979800 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/sg-bridge-1-build"] Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073344 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-ca-bundles\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073469 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-run\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073516 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-buildcachedir\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073572 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-system-configs\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073618 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-buildworkdir\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073661 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-proxy-ca-bundles\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073693 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-root\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073727 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-fjhqs\" (UniqueName: \"kubernetes.io/projected/dfdf3163-9935-424d-a190-789cf2321c76-kube-api-access-fjhqs\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073762 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-build-blob-cache\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073830 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-node-pullsecrets\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073886 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-pull\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.073925 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-push\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.074295 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-buildcachedir\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.074612 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-node-pullsecrets\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.075183 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-root\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.075202 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-system-configs\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.075236 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-run\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.075186 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-ca-bundles\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.075271 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-build-blob-cache\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.075328 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-buildworkdir\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.075676 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-proxy-ca-bundles\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.093330 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-push\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.093488 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-pull\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.097881 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-fjhqs\" (UniqueName: \"kubernetes.io/projected/dfdf3163-9935-424d-a190-789cf2321c76-kube-api-access-fjhqs\") pod \"sg-bridge-1-build\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:42 crc kubenswrapper[4875]: I1126 00:28:42.285837 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:43 crc kubenswrapper[4875]: I1126 00:28:43.233105 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/sg-bridge-1-build"] Nov 26 00:28:43 crc kubenswrapper[4875]: I1126 00:28:43.743091 4875 generic.go:334] "Generic (PLEG): container finished" podID="dfdf3163-9935-424d-a190-789cf2321c76" containerID="fbd93bc01d32a9c4f6a2d60e409d60bb12bb11b093e8dd30d0eef22a59412ba0" exitCode=0 Nov 26 00:28:43 crc kubenswrapper[4875]: I1126 00:28:43.743211 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-bridge-1-build" event={"ID":"dfdf3163-9935-424d-a190-789cf2321c76","Type":"ContainerDied","Data":"fbd93bc01d32a9c4f6a2d60e409d60bb12bb11b093e8dd30d0eef22a59412ba0"} Nov 26 00:28:43 crc kubenswrapper[4875]: I1126 00:28:43.743439 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-bridge-1-build" event={"ID":"dfdf3163-9935-424d-a190-789cf2321c76","Type":"ContainerStarted","Data":"5ab9df0e8045c66ae649dd2e4431a3a23ac027634e0d9ecd58151e8cf888f53c"} Nov 26 00:28:44 crc kubenswrapper[4875]: I1126 00:28:44.758243 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-bridge-1-build" event={"ID":"dfdf3163-9935-424d-a190-789cf2321c76","Type":"ContainerStarted","Data":"84c96ccb6b5a90259518349acd0c666c4f57bb52aaf4ab867479660aecaea788"} Nov 26 00:28:44 crc kubenswrapper[4875]: I1126 00:28:44.785851 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/sg-bridge-1-build" podStartSLOduration=3.785616518 podStartE2EDuration="3.785616518s" podCreationTimestamp="2025-11-26 00:28:41 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:28:44.783625074 +0000 UTC m=+1297.973600769" watchObservedRunningTime="2025-11-26 00:28:44.785616518 +0000 UTC m=+1297.975592223" Nov 26 00:28:50 crc kubenswrapper[4875]: I1126 00:28:50.792127 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_sg-bridge-1-build_dfdf3163-9935-424d-a190-789cf2321c76/docker-build/0.log" Nov 26 00:28:50 crc kubenswrapper[4875]: I1126 00:28:50.793528 4875 generic.go:334] "Generic (PLEG): container finished" podID="dfdf3163-9935-424d-a190-789cf2321c76" containerID="84c96ccb6b5a90259518349acd0c666c4f57bb52aaf4ab867479660aecaea788" exitCode=1 Nov 26 00:28:50 crc kubenswrapper[4875]: I1126 00:28:50.793609 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-bridge-1-build" event={"ID":"dfdf3163-9935-424d-a190-789cf2321c76","Type":"ContainerDied","Data":"84c96ccb6b5a90259518349acd0c666c4f57bb52aaf4ab867479660aecaea788"} Nov 26 00:28:51 crc kubenswrapper[4875]: I1126 00:28:51.992664 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_sg-bridge-1-build_dfdf3163-9935-424d-a190-789cf2321c76/docker-build/0.log" Nov 26 00:28:51 crc kubenswrapper[4875]: I1126 00:28:51.993172 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.105887 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-proxy-ca-bundles\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.106446 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-pull\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.106583 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-build-blob-cache\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.106693 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-system-configs\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.106510 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-proxy-ca-bundles" (OuterVolumeSpecName: "build-proxy-ca-bundles") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "build-proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.106953 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-node-pullsecrets\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.107134 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-buildworkdir\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.107281 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-fjhqs\" (UniqueName: \"kubernetes.io/projected/dfdf3163-9935-424d-a190-789cf2321c76-kube-api-access-fjhqs\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.107083 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.107172 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-system-configs" (OuterVolumeSpecName: "build-system-configs") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "build-system-configs". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.107555 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-buildworkdir" (OuterVolumeSpecName: "buildworkdir") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "buildworkdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.107754 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-root\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.107849 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-run\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.107877 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-ca-bundles\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.107901 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-buildcachedir\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.107931 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-push\") pod \"dfdf3163-9935-424d-a190-789cf2321c76\" (UID: \"dfdf3163-9935-424d-a190-789cf2321c76\") " Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.108007 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-buildcachedir" (OuterVolumeSpecName: "buildcachedir") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "buildcachedir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.108238 4875 reconciler_common.go:293] "Volume detached for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-buildcachedir\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.108258 4875 reconciler_common.go:293] "Volume detached for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.108272 4875 reconciler_common.go:293] "Volume detached for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-system-configs\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.108284 4875 reconciler_common.go:293] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/dfdf3163-9935-424d-a190-789cf2321c76-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.108306 4875 reconciler_common.go:293] "Volume detached for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-buildworkdir\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.108461 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-ca-bundles" (OuterVolumeSpecName: "build-ca-bundles") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "build-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.108895 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-run" (OuterVolumeSpecName: "container-storage-run") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "container-storage-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.112172 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dfdf3163-9935-424d-a190-789cf2321c76-kube-api-access-fjhqs" (OuterVolumeSpecName: "kube-api-access-fjhqs") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "kube-api-access-fjhqs". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.112381 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-push" (OuterVolumeSpecName: "builder-dockercfg-5d5px-push") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "builder-dockercfg-5d5px-push". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.114789 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-pull" (OuterVolumeSpecName: "builder-dockercfg-5d5px-pull") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "builder-dockercfg-5d5px-pull". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.163982 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-build-blob-cache" (OuterVolumeSpecName: "build-blob-cache") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "build-blob-cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.209240 4875 reconciler_common.go:293] "Volume detached for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-build-blob-cache\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.209270 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-fjhqs\" (UniqueName: \"kubernetes.io/projected/dfdf3163-9935-424d-a190-789cf2321c76-kube-api-access-fjhqs\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.209279 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-run\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.209289 4875 reconciler_common.go:293] "Volume detached for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/dfdf3163-9935-424d-a190-789cf2321c76-build-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.209331 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-push\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.209339 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/dfdf3163-9935-424d-a190-789cf2321c76-builder-dockercfg-5d5px-pull\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.390945 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["service-telemetry/sg-bridge-1-build"] Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.395951 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["service-telemetry/sg-bridge-1-build"] Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.460241 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-root" (OuterVolumeSpecName: "container-storage-root") pod "dfdf3163-9935-424d-a190-789cf2321c76" (UID: "dfdf3163-9935-424d-a190-789cf2321c76"). InnerVolumeSpecName "container-storage-root". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.512200 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/dfdf3163-9935-424d-a190-789cf2321c76-container-storage-root\") on node \"crc\" DevicePath \"\"" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.805508 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_sg-bridge-1-build_dfdf3163-9935-424d-a190-789cf2321c76/docker-build/0.log" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.805920 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5ab9df0e8045c66ae649dd2e4431a3a23ac027634e0d9ecd58151e8cf888f53c" Nov 26 00:28:52 crc kubenswrapper[4875]: I1126 00:28:52.805955 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-bridge-1-build" Nov 26 00:28:53 crc kubenswrapper[4875]: I1126 00:28:53.536753 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dfdf3163-9935-424d-a190-789cf2321c76" path="/var/lib/kubelet/pods/dfdf3163-9935-424d-a190-789cf2321c76/volumes" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.419448 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/sg-bridge-2-build"] Nov 26 00:28:54 crc kubenswrapper[4875]: E1126 00:28:54.420019 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfdf3163-9935-424d-a190-789cf2321c76" containerName="docker-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.420035 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfdf3163-9935-424d-a190-789cf2321c76" containerName="docker-build" Nov 26 00:28:54 crc kubenswrapper[4875]: E1126 00:28:54.420055 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="dfdf3163-9935-424d-a190-789cf2321c76" containerName="manage-dockerfile" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.420064 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="dfdf3163-9935-424d-a190-789cf2321c76" containerName="manage-dockerfile" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.420191 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="dfdf3163-9935-424d-a190-789cf2321c76" containerName="docker-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.421195 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.423884 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-bridge-2-sys-config" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.427284 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-bridge-2-ca" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.427385 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"builder-dockercfg-5d5px" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.427613 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"sg-bridge-2-global-ca" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.435576 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xgrxb\" (UniqueName: \"kubernetes.io/projected/839dfedb-9e62-4193-90ed-46e0adafc755-kube-api-access-xgrxb\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.435620 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-push\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.435649 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-pull\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.435673 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-ca-bundles\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.435708 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-node-pullsecrets\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.435745 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-build-blob-cache\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.435830 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-root\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.435892 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-run\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.435925 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-system-configs\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.435947 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-buildcachedir\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.435974 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-proxy-ca-bundles\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.436007 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-buildworkdir\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.442844 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/sg-bridge-2-build"] Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.537870 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-node-pullsecrets\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538006 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-build-blob-cache\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538040 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-root\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538065 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-run\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538082 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-node-pullsecrets\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538126 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-system-configs\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538521 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-run\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538732 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-build-blob-cache\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538774 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-root\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538829 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-buildcachedir\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538875 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-system-configs\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538908 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-buildcachedir\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.538939 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-proxy-ca-bundles\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.539013 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-buildworkdir\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.539173 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xgrxb\" (UniqueName: \"kubernetes.io/projected/839dfedb-9e62-4193-90ed-46e0adafc755-kube-api-access-xgrxb\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.539203 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-push\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.539223 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-pull\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.539244 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-ca-bundles\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.539296 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-buildworkdir\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.539633 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-proxy-ca-bundles\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.540298 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-ca-bundles\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.543306 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-pull\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.554533 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-push\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.556111 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xgrxb\" (UniqueName: \"kubernetes.io/projected/839dfedb-9e62-4193-90ed-46e0adafc755-kube-api-access-xgrxb\") pod \"sg-bridge-2-build\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.736336 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-bridge-2-build" Nov 26 00:28:54 crc kubenswrapper[4875]: I1126 00:28:54.915083 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/sg-bridge-2-build"] Nov 26 00:28:55 crc kubenswrapper[4875]: I1126 00:28:55.822932 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-bridge-2-build" event={"ID":"839dfedb-9e62-4193-90ed-46e0adafc755","Type":"ContainerStarted","Data":"e25f184ac64c361732b86b0d88232bb59228d1c04c70eba6cfd1cc0207baad2f"} Nov 26 00:28:55 crc kubenswrapper[4875]: I1126 00:28:55.822977 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-bridge-2-build" event={"ID":"839dfedb-9e62-4193-90ed-46e0adafc755","Type":"ContainerStarted","Data":"babc74c97d2f0af68763546ef08537988bd026307b1faa4958b59027821201ac"} Nov 26 00:28:56 crc kubenswrapper[4875]: I1126 00:28:56.829499 4875 generic.go:334] "Generic (PLEG): container finished" podID="839dfedb-9e62-4193-90ed-46e0adafc755" containerID="e25f184ac64c361732b86b0d88232bb59228d1c04c70eba6cfd1cc0207baad2f" exitCode=0 Nov 26 00:28:56 crc kubenswrapper[4875]: I1126 00:28:56.829578 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-bridge-2-build" event={"ID":"839dfedb-9e62-4193-90ed-46e0adafc755","Type":"ContainerDied","Data":"e25f184ac64c361732b86b0d88232bb59228d1c04c70eba6cfd1cc0207baad2f"} Nov 26 00:28:57 crc kubenswrapper[4875]: I1126 00:28:57.838355 4875 generic.go:334] "Generic (PLEG): container finished" podID="839dfedb-9e62-4193-90ed-46e0adafc755" containerID="b1dec3b318336d55324a057bb3952f13640f57f16ce78a78745a07dc73a5bf3a" exitCode=0 Nov 26 00:28:57 crc kubenswrapper[4875]: I1126 00:28:57.838398 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-bridge-2-build" event={"ID":"839dfedb-9e62-4193-90ed-46e0adafc755","Type":"ContainerDied","Data":"b1dec3b318336d55324a057bb3952f13640f57f16ce78a78745a07dc73a5bf3a"} Nov 26 00:28:57 crc kubenswrapper[4875]: I1126 00:28:57.884470 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_sg-bridge-2-build_839dfedb-9e62-4193-90ed-46e0adafc755/manage-dockerfile/0.log" Nov 26 00:28:58 crc kubenswrapper[4875]: I1126 00:28:58.845855 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-bridge-2-build" event={"ID":"839dfedb-9e62-4193-90ed-46e0adafc755","Type":"ContainerStarted","Data":"a1922460277ff18c44f45b61bc65baf30100495da07f8a7e22ed966dbf8758a2"} Nov 26 00:28:58 crc kubenswrapper[4875]: I1126 00:28:58.874876 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/sg-bridge-2-build" podStartSLOduration=4.874855463 podStartE2EDuration="4.874855463s" podCreationTimestamp="2025-11-26 00:28:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:28:58.867693958 +0000 UTC m=+1312.057669663" watchObservedRunningTime="2025-11-26 00:28:58.874855463 +0000 UTC m=+1312.064831158" Nov 26 00:29:07 crc kubenswrapper[4875]: I1126 00:29:07.511621 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:29:07 crc kubenswrapper[4875]: I1126 00:29:07.512179 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:29:07 crc kubenswrapper[4875]: I1126 00:29:07.512221 4875 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:29:07 crc kubenswrapper[4875]: I1126 00:29:07.512777 4875 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4"} pod="openshift-machine-config-operator/machine-config-daemon-9mztb" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 00:29:07 crc kubenswrapper[4875]: I1126 00:29:07.512843 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" containerID="cri-o://9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4" gracePeriod=600 Nov 26 00:29:07 crc kubenswrapper[4875]: I1126 00:29:07.896444 4875 generic.go:334] "Generic (PLEG): container finished" podID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerID="9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4" exitCode=0 Nov 26 00:29:07 crc kubenswrapper[4875]: I1126 00:29:07.896697 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerDied","Data":"9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4"} Nov 26 00:29:07 crc kubenswrapper[4875]: I1126 00:29:07.896722 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerStarted","Data":"5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921"} Nov 26 00:29:07 crc kubenswrapper[4875]: I1126 00:29:07.896738 4875 scope.go:117] "RemoveContainer" containerID="924f375e812a0719e7bc1f7b009dabf2f368d974e2fd9a554bfc2a38ef6023a5" Nov 26 00:29:13 crc kubenswrapper[4875]: E1126 00:29:13.626597 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:29:23 crc kubenswrapper[4875]: E1126 00:29:23.747261 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:29:33 crc kubenswrapper[4875]: E1126 00:29:33.877745 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:29:44 crc kubenswrapper[4875]: E1126 00:29:44.001220 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:29:48 crc kubenswrapper[4875]: I1126 00:29:48.126706 4875 generic.go:334] "Generic (PLEG): container finished" podID="839dfedb-9e62-4193-90ed-46e0adafc755" containerID="a1922460277ff18c44f45b61bc65baf30100495da07f8a7e22ed966dbf8758a2" exitCode=0 Nov 26 00:29:48 crc kubenswrapper[4875]: I1126 00:29:48.126793 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-bridge-2-build" event={"ID":"839dfedb-9e62-4193-90ed-46e0adafc755","Type":"ContainerDied","Data":"a1922460277ff18c44f45b61bc65baf30100495da07f8a7e22ed966dbf8758a2"} Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.367785 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-bridge-2-build" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.530782 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-buildcachedir\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531089 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-system-configs\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531145 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-ca-bundles\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531185 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-node-pullsecrets\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531233 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-proxy-ca-bundles\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.530891 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-buildcachedir" (OuterVolumeSpecName: "buildcachedir") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "buildcachedir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531244 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531261 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-root\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531291 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-build-blob-cache\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531318 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-pull\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531345 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-buildworkdir\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531392 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-push\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531461 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xgrxb\" (UniqueName: \"kubernetes.io/projected/839dfedb-9e62-4193-90ed-46e0adafc755-kube-api-access-xgrxb\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531497 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-run\") pod \"839dfedb-9e62-4193-90ed-46e0adafc755\" (UID: \"839dfedb-9e62-4193-90ed-46e0adafc755\") " Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531759 4875 reconciler_common.go:293] "Volume detached for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-buildcachedir\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531775 4875 reconciler_common.go:293] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/839dfedb-9e62-4193-90ed-46e0adafc755-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531845 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-proxy-ca-bundles" (OuterVolumeSpecName: "build-proxy-ca-bundles") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "build-proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.531932 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-ca-bundles" (OuterVolumeSpecName: "build-ca-bundles") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "build-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.532124 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-system-configs" (OuterVolumeSpecName: "build-system-configs") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "build-system-configs". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.532932 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-run" (OuterVolumeSpecName: "container-storage-run") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "container-storage-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.533103 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-buildworkdir" (OuterVolumeSpecName: "buildworkdir") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "buildworkdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.538597 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-push" (OuterVolumeSpecName: "builder-dockercfg-5d5px-push") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "builder-dockercfg-5d5px-push". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.538624 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/839dfedb-9e62-4193-90ed-46e0adafc755-kube-api-access-xgrxb" (OuterVolumeSpecName: "kube-api-access-xgrxb") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "kube-api-access-xgrxb". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.538692 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-pull" (OuterVolumeSpecName: "builder-dockercfg-5d5px-pull") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "builder-dockercfg-5d5px-pull". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.633024 4875 reconciler_common.go:293] "Volume detached for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.633068 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-pull\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.633080 4875 reconciler_common.go:293] "Volume detached for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-buildworkdir\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.633090 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/839dfedb-9e62-4193-90ed-46e0adafc755-builder-dockercfg-5d5px-push\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.633099 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xgrxb\" (UniqueName: \"kubernetes.io/projected/839dfedb-9e62-4193-90ed-46e0adafc755-kube-api-access-xgrxb\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.633107 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-run\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.633118 4875 reconciler_common.go:293] "Volume detached for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-system-configs\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.633129 4875 reconciler_common.go:293] "Volume detached for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/839dfedb-9e62-4193-90ed-46e0adafc755-build-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.643044 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-build-blob-cache" (OuterVolumeSpecName: "build-blob-cache") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "build-blob-cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:29:49 crc kubenswrapper[4875]: I1126 00:29:49.734851 4875 reconciler_common.go:293] "Volume detached for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-build-blob-cache\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:50 crc kubenswrapper[4875]: I1126 00:29:50.143032 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/sg-bridge-2-build" event={"ID":"839dfedb-9e62-4193-90ed-46e0adafc755","Type":"ContainerDied","Data":"babc74c97d2f0af68763546ef08537988bd026307b1faa4958b59027821201ac"} Nov 26 00:29:50 crc kubenswrapper[4875]: I1126 00:29:50.143076 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="babc74c97d2f0af68763546ef08537988bd026307b1faa4958b59027821201ac" Nov 26 00:29:50 crc kubenswrapper[4875]: I1126 00:29:50.143226 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/sg-bridge-2-build" Nov 26 00:29:50 crc kubenswrapper[4875]: I1126 00:29:50.215719 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-root" (OuterVolumeSpecName: "container-storage-root") pod "839dfedb-9e62-4193-90ed-46e0adafc755" (UID: "839dfedb-9e62-4193-90ed-46e0adafc755"). InnerVolumeSpecName "container-storage-root". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:29:50 crc kubenswrapper[4875]: I1126 00:29:50.243325 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/839dfedb-9e62-4193-90ed-46e0adafc755-container-storage-root\") on node \"crc\" DevicePath \"\"" Nov 26 00:29:54 crc kubenswrapper[4875]: E1126 00:29:54.136025 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.258797 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/prometheus-webhook-snmp-1-build"] Nov 26 00:29:54 crc kubenswrapper[4875]: E1126 00:29:54.259212 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="839dfedb-9e62-4193-90ed-46e0adafc755" containerName="docker-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.259225 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="839dfedb-9e62-4193-90ed-46e0adafc755" containerName="docker-build" Nov 26 00:29:54 crc kubenswrapper[4875]: E1126 00:29:54.259237 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="839dfedb-9e62-4193-90ed-46e0adafc755" containerName="git-clone" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.259243 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="839dfedb-9e62-4193-90ed-46e0adafc755" containerName="git-clone" Nov 26 00:29:54 crc kubenswrapper[4875]: E1126 00:29:54.259258 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="839dfedb-9e62-4193-90ed-46e0adafc755" containerName="manage-dockerfile" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.259264 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="839dfedb-9e62-4193-90ed-46e0adafc755" containerName="manage-dockerfile" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.259379 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="839dfedb-9e62-4193-90ed-46e0adafc755" containerName="docker-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.260141 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.262480 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"prometheus-webhook-snmp-1-ca" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.262940 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"prometheus-webhook-snmp-1-global-ca" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.274514 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"builder-dockercfg-5d5px" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.274872 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"prometheus-webhook-snmp-1-sys-config" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.281695 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/prometheus-webhook-snmp-1-build"] Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.394824 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-node-pullsecrets\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.395181 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-blob-cache\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.395277 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-msvjf\" (UniqueName: \"kubernetes.io/projected/36f7da7c-5954-4c03-8cf5-01df9cfa3577-kube-api-access-msvjf\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.395363 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-ca-bundles\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.395484 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-proxy-ca-bundles\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.395575 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildworkdir\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.395680 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-root\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.395788 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildcachedir\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.395881 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-push\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.395959 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-pull\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.396031 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-system-configs\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.396213 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-run\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497094 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildcachedir\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497151 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-push\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497178 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-pull\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497182 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildcachedir\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497196 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-system-configs\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497277 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-run\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497307 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-node-pullsecrets\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497353 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-blob-cache\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497376 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-msvjf\" (UniqueName: \"kubernetes.io/projected/36f7da7c-5954-4c03-8cf5-01df9cfa3577-kube-api-access-msvjf\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497434 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-ca-bundles\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497488 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-proxy-ca-bundles\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497536 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildworkdir\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497564 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-root\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497884 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-node-pullsecrets\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497892 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-system-configs\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.497935 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-run\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.498178 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildworkdir\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.498063 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-root\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.498011 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-blob-cache\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.508397 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-proxy-ca-bundles\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.625138 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-pull\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.625166 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-push\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.625262 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-ca-bundles\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.627852 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-msvjf\" (UniqueName: \"kubernetes.io/projected/36f7da7c-5954-4c03-8cf5-01df9cfa3577-kube-api-access-msvjf\") pod \"prometheus-webhook-snmp-1-build\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:54 crc kubenswrapper[4875]: I1126 00:29:54.884056 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:29:55 crc kubenswrapper[4875]: I1126 00:29:55.274521 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/prometheus-webhook-snmp-1-build"] Nov 26 00:29:56 crc kubenswrapper[4875]: I1126 00:29:56.178613 4875 generic.go:334] "Generic (PLEG): container finished" podID="36f7da7c-5954-4c03-8cf5-01df9cfa3577" containerID="0136e3b4b34ae835d92ec71e62ac1563a6d81a1be0474d2e215c7cc82e3ea391" exitCode=0 Nov 26 00:29:56 crc kubenswrapper[4875]: I1126 00:29:56.178663 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-1-build" event={"ID":"36f7da7c-5954-4c03-8cf5-01df9cfa3577","Type":"ContainerDied","Data":"0136e3b4b34ae835d92ec71e62ac1563a6d81a1be0474d2e215c7cc82e3ea391"} Nov 26 00:29:56 crc kubenswrapper[4875]: I1126 00:29:56.178692 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-1-build" event={"ID":"36f7da7c-5954-4c03-8cf5-01df9cfa3577","Type":"ContainerStarted","Data":"14c73ed6852acfb21cfcb2cba0f9c71787ee665baedd76afeabeb4d4ad52188c"} Nov 26 00:29:57 crc kubenswrapper[4875]: I1126 00:29:57.188943 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-1-build" event={"ID":"36f7da7c-5954-4c03-8cf5-01df9cfa3577","Type":"ContainerStarted","Data":"79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26"} Nov 26 00:29:57 crc kubenswrapper[4875]: I1126 00:29:57.225169 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/prometheus-webhook-snmp-1-build" podStartSLOduration=3.22512378 podStartE2EDuration="3.22512378s" podCreationTimestamp="2025-11-26 00:29:54 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:29:57.212882858 +0000 UTC m=+1370.402858593" watchObservedRunningTime="2025-11-26 00:29:57.22512378 +0000 UTC m=+1370.415099515" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.136009 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7"] Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.137213 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.139759 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"collect-profiles-config" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.140684 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"collect-profiles-dockercfg-kzf4t" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.148194 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7"] Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.294761 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c38ffff-58a1-4661-9237-df3cf6241fda-config-volume\") pod \"collect-profiles-29401950-kn9p7\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.294809 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c38ffff-58a1-4661-9237-df3cf6241fda-secret-volume\") pod \"collect-profiles-29401950-kn9p7\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.294845 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-5hwcf\" (UniqueName: \"kubernetes.io/projected/8c38ffff-58a1-4661-9237-df3cf6241fda-kube-api-access-5hwcf\") pod \"collect-profiles-29401950-kn9p7\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.396958 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c38ffff-58a1-4661-9237-df3cf6241fda-config-volume\") pod \"collect-profiles-29401950-kn9p7\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.397031 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c38ffff-58a1-4661-9237-df3cf6241fda-secret-volume\") pod \"collect-profiles-29401950-kn9p7\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.397099 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-5hwcf\" (UniqueName: \"kubernetes.io/projected/8c38ffff-58a1-4661-9237-df3cf6241fda-kube-api-access-5hwcf\") pod \"collect-profiles-29401950-kn9p7\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.397775 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c38ffff-58a1-4661-9237-df3cf6241fda-config-volume\") pod \"collect-profiles-29401950-kn9p7\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.402610 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c38ffff-58a1-4661-9237-df3cf6241fda-secret-volume\") pod \"collect-profiles-29401950-kn9p7\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.416217 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-5hwcf\" (UniqueName: \"kubernetes.io/projected/8c38ffff-58a1-4661-9237-df3cf6241fda-kube-api-access-5hwcf\") pod \"collect-profiles-29401950-kn9p7\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.457039 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:00 crc kubenswrapper[4875]: I1126 00:30:00.657524 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7"] Nov 26 00:30:00 crc kubenswrapper[4875]: W1126 00:30:00.664125 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod8c38ffff_58a1_4661_9237_df3cf6241fda.slice/crio-93842dc24d90ca518bbd2b89275f437514000296f0a006c4b74bd9314ffb9e53 WatchSource:0}: Error finding container 93842dc24d90ca518bbd2b89275f437514000296f0a006c4b74bd9314ffb9e53: Status 404 returned error can't find the container with id 93842dc24d90ca518bbd2b89275f437514000296f0a006c4b74bd9314ffb9e53 Nov 26 00:30:01 crc kubenswrapper[4875]: I1126 00:30:01.213628 4875 generic.go:334] "Generic (PLEG): container finished" podID="8c38ffff-58a1-4661-9237-df3cf6241fda" containerID="7800c36178453f28107d979c4661b17c5dcf83141f69f2f84399ed555752a5bf" exitCode=0 Nov 26 00:30:01 crc kubenswrapper[4875]: I1126 00:30:01.213833 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" event={"ID":"8c38ffff-58a1-4661-9237-df3cf6241fda","Type":"ContainerDied","Data":"7800c36178453f28107d979c4661b17c5dcf83141f69f2f84399ed555752a5bf"} Nov 26 00:30:01 crc kubenswrapper[4875]: I1126 00:30:01.213894 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" event={"ID":"8c38ffff-58a1-4661-9237-df3cf6241fda","Type":"ContainerStarted","Data":"93842dc24d90ca518bbd2b89275f437514000296f0a006c4b74bd9314ffb9e53"} Nov 26 00:30:02 crc kubenswrapper[4875]: I1126 00:30:02.486248 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:02 crc kubenswrapper[4875]: I1126 00:30:02.627472 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-5hwcf\" (UniqueName: \"kubernetes.io/projected/8c38ffff-58a1-4661-9237-df3cf6241fda-kube-api-access-5hwcf\") pod \"8c38ffff-58a1-4661-9237-df3cf6241fda\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " Nov 26 00:30:02 crc kubenswrapper[4875]: I1126 00:30:02.627616 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c38ffff-58a1-4661-9237-df3cf6241fda-secret-volume\") pod \"8c38ffff-58a1-4661-9237-df3cf6241fda\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " Nov 26 00:30:02 crc kubenswrapper[4875]: I1126 00:30:02.627648 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c38ffff-58a1-4661-9237-df3cf6241fda-config-volume\") pod \"8c38ffff-58a1-4661-9237-df3cf6241fda\" (UID: \"8c38ffff-58a1-4661-9237-df3cf6241fda\") " Nov 26 00:30:02 crc kubenswrapper[4875]: I1126 00:30:02.628232 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/8c38ffff-58a1-4661-9237-df3cf6241fda-config-volume" (OuterVolumeSpecName: "config-volume") pod "8c38ffff-58a1-4661-9237-df3cf6241fda" (UID: "8c38ffff-58a1-4661-9237-df3cf6241fda"). InnerVolumeSpecName "config-volume". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:30:02 crc kubenswrapper[4875]: I1126 00:30:02.629216 4875 reconciler_common.go:293] "Volume detached for volume \"config-volume\" (UniqueName: \"kubernetes.io/configmap/8c38ffff-58a1-4661-9237-df3cf6241fda-config-volume\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:02 crc kubenswrapper[4875]: I1126 00:30:02.633365 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8c38ffff-58a1-4661-9237-df3cf6241fda-kube-api-access-5hwcf" (OuterVolumeSpecName: "kube-api-access-5hwcf") pod "8c38ffff-58a1-4661-9237-df3cf6241fda" (UID: "8c38ffff-58a1-4661-9237-df3cf6241fda"). InnerVolumeSpecName "kube-api-access-5hwcf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:30:02 crc kubenswrapper[4875]: I1126 00:30:02.634014 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/8c38ffff-58a1-4661-9237-df3cf6241fda-secret-volume" (OuterVolumeSpecName: "secret-volume") pod "8c38ffff-58a1-4661-9237-df3cf6241fda" (UID: "8c38ffff-58a1-4661-9237-df3cf6241fda"). InnerVolumeSpecName "secret-volume". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:30:02 crc kubenswrapper[4875]: I1126 00:30:02.730978 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-5hwcf\" (UniqueName: \"kubernetes.io/projected/8c38ffff-58a1-4661-9237-df3cf6241fda-kube-api-access-5hwcf\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:02 crc kubenswrapper[4875]: I1126 00:30:02.731015 4875 reconciler_common.go:293] "Volume detached for volume \"secret-volume\" (UniqueName: \"kubernetes.io/secret/8c38ffff-58a1-4661-9237-df3cf6241fda-secret-volume\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:03 crc kubenswrapper[4875]: I1126 00:30:03.225659 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" event={"ID":"8c38ffff-58a1-4661-9237-df3cf6241fda","Type":"ContainerDied","Data":"93842dc24d90ca518bbd2b89275f437514000296f0a006c4b74bd9314ffb9e53"} Nov 26 00:30:03 crc kubenswrapper[4875]: I1126 00:30:03.225698 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="93842dc24d90ca518bbd2b89275f437514000296f0a006c4b74bd9314ffb9e53" Nov 26 00:30:03 crc kubenswrapper[4875]: I1126 00:30:03.225704 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-operator-lifecycle-manager/collect-profiles-29401950-kn9p7" Nov 26 00:30:04 crc kubenswrapper[4875]: E1126 00:30:04.255979 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.441323 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["service-telemetry/prometheus-webhook-snmp-1-build"] Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.441631 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="service-telemetry/prometheus-webhook-snmp-1-build" podUID="36f7da7c-5954-4c03-8cf5-01df9cfa3577" containerName="docker-build" containerID="cri-o://79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26" gracePeriod=30 Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.760478 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_prometheus-webhook-snmp-1-build_36f7da7c-5954-4c03-8cf5-01df9cfa3577/docker-build/0.log" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.761214 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.859811 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-ca-bundles\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.859879 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-run\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.859903 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-msvjf\" (UniqueName: \"kubernetes.io/projected/36f7da7c-5954-4c03-8cf5-01df9cfa3577-kube-api-access-msvjf\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.859932 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-proxy-ca-bundles\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.860089 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-root\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.860154 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-system-configs\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.860221 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-push\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.861014 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-blob-cache\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.860952 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-system-configs" (OuterVolumeSpecName: "build-system-configs") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "build-system-configs". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.861053 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-ca-bundles" (OuterVolumeSpecName: "build-ca-bundles") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "build-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.860980 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-proxy-ca-bundles" (OuterVolumeSpecName: "build-proxy-ca-bundles") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "build-proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.861113 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-pull\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.861161 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildcachedir\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.861184 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-node-pullsecrets\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.861215 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildcachedir" (OuterVolumeSpecName: "buildcachedir") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "buildcachedir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.861459 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildworkdir\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.861854 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildworkdir" (OuterVolumeSpecName: "buildworkdir") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "buildworkdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.861885 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-run" (OuterVolumeSpecName: "container-storage-run") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "container-storage-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.861898 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.862108 4875 reconciler_common.go:293] "Volume detached for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-system-configs\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.862135 4875 reconciler_common.go:293] "Volume detached for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildcachedir\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.862144 4875 reconciler_common.go:293] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/36f7da7c-5954-4c03-8cf5-01df9cfa3577-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.862155 4875 reconciler_common.go:293] "Volume detached for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-buildworkdir\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.862164 4875 reconciler_common.go:293] "Volume detached for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.862174 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-run\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.862182 4875 reconciler_common.go:293] "Volume detached for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.865207 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-pull" (OuterVolumeSpecName: "builder-dockercfg-5d5px-pull") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "builder-dockercfg-5d5px-pull". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.865238 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/36f7da7c-5954-4c03-8cf5-01df9cfa3577-kube-api-access-msvjf" (OuterVolumeSpecName: "kube-api-access-msvjf") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "kube-api-access-msvjf". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.869590 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-push" (OuterVolumeSpecName: "builder-dockercfg-5d5px-push") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "builder-dockercfg-5d5px-push". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.915504 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-blob-cache" (OuterVolumeSpecName: "build-blob-cache") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "build-blob-cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.963115 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-push\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.963147 4875 reconciler_common.go:293] "Volume detached for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-build-blob-cache\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.963157 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/36f7da7c-5954-4c03-8cf5-01df9cfa3577-builder-dockercfg-5d5px-pull\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:04 crc kubenswrapper[4875]: I1126 00:30:04.963165 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-msvjf\" (UniqueName: \"kubernetes.io/projected/36f7da7c-5954-4c03-8cf5-01df9cfa3577-kube-api-access-msvjf\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.164947 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-root" (OuterVolumeSpecName: "container-storage-root") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "container-storage-root". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.165383 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-root\") pod \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\" (UID: \"36f7da7c-5954-4c03-8cf5-01df9cfa3577\") " Nov 26 00:30:05 crc kubenswrapper[4875]: W1126 00:30:05.165555 4875 empty_dir.go:500] Warning: Unmount skipped because path does not exist: /var/lib/kubelet/pods/36f7da7c-5954-4c03-8cf5-01df9cfa3577/volumes/kubernetes.io~empty-dir/container-storage-root Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.165588 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-root" (OuterVolumeSpecName: "container-storage-root") pod "36f7da7c-5954-4c03-8cf5-01df9cfa3577" (UID: "36f7da7c-5954-4c03-8cf5-01df9cfa3577"). InnerVolumeSpecName "container-storage-root". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.165894 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/36f7da7c-5954-4c03-8cf5-01df9cfa3577-container-storage-root\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.239425 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_prometheus-webhook-snmp-1-build_36f7da7c-5954-4c03-8cf5-01df9cfa3577/docker-build/0.log" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.240287 4875 generic.go:334] "Generic (PLEG): container finished" podID="36f7da7c-5954-4c03-8cf5-01df9cfa3577" containerID="79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26" exitCode=1 Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.240341 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/prometheus-webhook-snmp-1-build" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.240378 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-1-build" event={"ID":"36f7da7c-5954-4c03-8cf5-01df9cfa3577","Type":"ContainerDied","Data":"79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26"} Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.240750 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-1-build" event={"ID":"36f7da7c-5954-4c03-8cf5-01df9cfa3577","Type":"ContainerDied","Data":"14c73ed6852acfb21cfcb2cba0f9c71787ee665baedd76afeabeb4d4ad52188c"} Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.240778 4875 scope.go:117] "RemoveContainer" containerID="79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.258110 4875 scope.go:117] "RemoveContainer" containerID="0136e3b4b34ae835d92ec71e62ac1563a6d81a1be0474d2e215c7cc82e3ea391" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.278483 4875 scope.go:117] "RemoveContainer" containerID="79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26" Nov 26 00:30:05 crc kubenswrapper[4875]: E1126 00:30:05.281733 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26\": container with ID starting with 79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26 not found: ID does not exist" containerID="79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.281771 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26"} err="failed to get container status \"79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26\": rpc error: code = NotFound desc = could not find container \"79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26\": container with ID starting with 79e3607c207678e89484a64254995eafa930be8a2b80beb67e31601206d7fb26 not found: ID does not exist" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.281795 4875 scope.go:117] "RemoveContainer" containerID="0136e3b4b34ae835d92ec71e62ac1563a6d81a1be0474d2e215c7cc82e3ea391" Nov 26 00:30:05 crc kubenswrapper[4875]: E1126 00:30:05.282755 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0136e3b4b34ae835d92ec71e62ac1563a6d81a1be0474d2e215c7cc82e3ea391\": container with ID starting with 0136e3b4b34ae835d92ec71e62ac1563a6d81a1be0474d2e215c7cc82e3ea391 not found: ID does not exist" containerID="0136e3b4b34ae835d92ec71e62ac1563a6d81a1be0474d2e215c7cc82e3ea391" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.282797 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0136e3b4b34ae835d92ec71e62ac1563a6d81a1be0474d2e215c7cc82e3ea391"} err="failed to get container status \"0136e3b4b34ae835d92ec71e62ac1563a6d81a1be0474d2e215c7cc82e3ea391\": rpc error: code = NotFound desc = could not find container \"0136e3b4b34ae835d92ec71e62ac1563a6d81a1be0474d2e215c7cc82e3ea391\": container with ID starting with 0136e3b4b34ae835d92ec71e62ac1563a6d81a1be0474d2e215c7cc82e3ea391 not found: ID does not exist" Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.289506 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["service-telemetry/prometheus-webhook-snmp-1-build"] Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.291458 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["service-telemetry/prometheus-webhook-snmp-1-build"] Nov 26 00:30:05 crc kubenswrapper[4875]: I1126 00:30:05.537201 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="36f7da7c-5954-4c03-8cf5-01df9cfa3577" path="/var/lib/kubelet/pods/36f7da7c-5954-4c03-8cf5-01df9cfa3577/volumes" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.072110 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["service-telemetry/prometheus-webhook-snmp-2-build"] Nov 26 00:30:06 crc kubenswrapper[4875]: E1126 00:30:06.072372 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36f7da7c-5954-4c03-8cf5-01df9cfa3577" containerName="docker-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.072387 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="36f7da7c-5954-4c03-8cf5-01df9cfa3577" containerName="docker-build" Nov 26 00:30:06 crc kubenswrapper[4875]: E1126 00:30:06.072399 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="8c38ffff-58a1-4661-9237-df3cf6241fda" containerName="collect-profiles" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.072423 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="8c38ffff-58a1-4661-9237-df3cf6241fda" containerName="collect-profiles" Nov 26 00:30:06 crc kubenswrapper[4875]: E1126 00:30:06.072447 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="36f7da7c-5954-4c03-8cf5-01df9cfa3577" containerName="manage-dockerfile" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.072455 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="36f7da7c-5954-4c03-8cf5-01df9cfa3577" containerName="manage-dockerfile" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.072592 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="8c38ffff-58a1-4661-9237-df3cf6241fda" containerName="collect-profiles" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.072606 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="36f7da7c-5954-4c03-8cf5-01df9cfa3577" containerName="docker-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.074199 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.081033 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"prometheus-webhook-snmp-2-global-ca" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.081261 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"prometheus-webhook-snmp-2-sys-config" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.081043 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"builder-dockercfg-5d5px" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.082610 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"prometheus-webhook-snmp-2-ca" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.093647 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/prometheus-webhook-snmp-2-build"] Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.178600 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-buildworkdir\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.178814 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-system-configs\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.178849 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-ca-bundles\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.178908 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-run\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.179034 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-buildcachedir\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.179109 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-build-blob-cache\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.179137 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-root\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.179157 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-jj87p\" (UniqueName: \"kubernetes.io/projected/650295c4-b8e4-4036-8245-441b299b5aac-kube-api-access-jj87p\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.179258 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-push\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.179361 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-pull\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.179399 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-node-pullsecrets\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.179472 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-proxy-ca-bundles\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280492 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-system-configs\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280542 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-ca-bundles\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280575 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-run\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280604 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-buildcachedir\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280630 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-build-blob-cache\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280653 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-root\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280670 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-jj87p\" (UniqueName: \"kubernetes.io/projected/650295c4-b8e4-4036-8245-441b299b5aac-kube-api-access-jj87p\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280692 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-push\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280705 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-pull\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280738 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-node-pullsecrets\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280758 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-proxy-ca-bundles\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.280779 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-buildworkdir\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.281178 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-buildworkdir\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.281640 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-system-configs\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.282323 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-ca-bundles\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.282600 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-run\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.282645 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-buildcachedir\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.282805 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-build-blob-cache\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.283025 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-root\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.283873 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-node-pullsecrets\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.284428 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-proxy-ca-bundles\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.287561 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-push\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.287601 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-pull\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.298380 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-jj87p\" (UniqueName: \"kubernetes.io/projected/650295c4-b8e4-4036-8245-441b299b5aac-kube-api-access-jj87p\") pod \"prometheus-webhook-snmp-2-build\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.394105 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:30:06 crc kubenswrapper[4875]: I1126 00:30:06.810469 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["service-telemetry/prometheus-webhook-snmp-2-build"] Nov 26 00:30:07 crc kubenswrapper[4875]: I1126 00:30:07.254105 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-2-build" event={"ID":"650295c4-b8e4-4036-8245-441b299b5aac","Type":"ContainerStarted","Data":"37e2e0980a8fe0dd499e1879a854ecde108f98b12804b31d7d170d490c05938d"} Nov 26 00:30:07 crc kubenswrapper[4875]: I1126 00:30:07.254463 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-2-build" event={"ID":"650295c4-b8e4-4036-8245-441b299b5aac","Type":"ContainerStarted","Data":"4a3c12665004c51d77b78385c7d531378331fc0d546d104801e9e57c4d6ede29"} Nov 26 00:30:08 crc kubenswrapper[4875]: I1126 00:30:08.264100 4875 generic.go:334] "Generic (PLEG): container finished" podID="650295c4-b8e4-4036-8245-441b299b5aac" containerID="37e2e0980a8fe0dd499e1879a854ecde108f98b12804b31d7d170d490c05938d" exitCode=0 Nov 26 00:30:08 crc kubenswrapper[4875]: I1126 00:30:08.264197 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-2-build" event={"ID":"650295c4-b8e4-4036-8245-441b299b5aac","Type":"ContainerDied","Data":"37e2e0980a8fe0dd499e1879a854ecde108f98b12804b31d7d170d490c05938d"} Nov 26 00:30:09 crc kubenswrapper[4875]: I1126 00:30:09.272605 4875 generic.go:334] "Generic (PLEG): container finished" podID="650295c4-b8e4-4036-8245-441b299b5aac" containerID="32a00606949aba2e3778e8c33d7b3d44b05cbbc5803144b39286891fa23f1555" exitCode=0 Nov 26 00:30:09 crc kubenswrapper[4875]: I1126 00:30:09.272716 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-2-build" event={"ID":"650295c4-b8e4-4036-8245-441b299b5aac","Type":"ContainerDied","Data":"32a00606949aba2e3778e8c33d7b3d44b05cbbc5803144b39286891fa23f1555"} Nov 26 00:30:09 crc kubenswrapper[4875]: I1126 00:30:09.321221 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_prometheus-webhook-snmp-2-build_650295c4-b8e4-4036-8245-441b299b5aac/manage-dockerfile/0.log" Nov 26 00:30:10 crc kubenswrapper[4875]: I1126 00:30:10.281295 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-2-build" event={"ID":"650295c4-b8e4-4036-8245-441b299b5aac","Type":"ContainerStarted","Data":"c0eef9a70b2d5a1642372aabf0acebb86c0a1c2b67c2a93f2305b5695ce948a3"} Nov 26 00:30:10 crc kubenswrapper[4875]: I1126 00:30:10.313743 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="service-telemetry/prometheus-webhook-snmp-2-build" podStartSLOduration=4.313717541 podStartE2EDuration="4.313717541s" podCreationTimestamp="2025-11-26 00:30:06 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:30:10.309942559 +0000 UTC m=+1383.499918254" watchObservedRunningTime="2025-11-26 00:30:10.313717541 +0000 UTC m=+1383.503693256" Nov 26 00:30:13 crc kubenswrapper[4875]: I1126 00:30:13.892840 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 00:30:13 crc kubenswrapper[4875]: I1126 00:30:13.894038 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 00:30:13 crc kubenswrapper[4875]: I1126 00:30:13.900079 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 00:30:13 crc kubenswrapper[4875]: I1126 00:30:13.900126 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 00:30:13 crc kubenswrapper[4875]: I1126 00:30:13.900994 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 00:30:13 crc kubenswrapper[4875]: I1126 00:30:13.980718 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/862039a4-64b5-4c36-b5e0-1046d2061b90-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"862039a4-64b5-4c36-b5e0-1046d2061b90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 00:30:13 crc kubenswrapper[4875]: I1126 00:30:13.980903 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/862039a4-64b5-4c36-b5e0-1046d2061b90-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"862039a4-64b5-4c36-b5e0-1046d2061b90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 00:30:14 crc kubenswrapper[4875]: I1126 00:30:14.081860 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/862039a4-64b5-4c36-b5e0-1046d2061b90-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"862039a4-64b5-4c36-b5e0-1046d2061b90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 00:30:14 crc kubenswrapper[4875]: I1126 00:30:14.081940 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/862039a4-64b5-4c36-b5e0-1046d2061b90-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"862039a4-64b5-4c36-b5e0-1046d2061b90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 00:30:14 crc kubenswrapper[4875]: I1126 00:30:14.082068 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/862039a4-64b5-4c36-b5e0-1046d2061b90-kubelet-dir\") pod \"revision-pruner-9-crc\" (UID: \"862039a4-64b5-4c36-b5e0-1046d2061b90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 00:30:14 crc kubenswrapper[4875]: I1126 00:30:14.103336 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/862039a4-64b5-4c36-b5e0-1046d2061b90-kube-api-access\") pod \"revision-pruner-9-crc\" (UID: \"862039a4-64b5-4c36-b5e0-1046d2061b90\") " pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 00:30:14 crc kubenswrapper[4875]: I1126 00:30:14.212566 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 00:30:14 crc kubenswrapper[4875]: I1126 00:30:14.429643 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/revision-pruner-9-crc"] Nov 26 00:30:14 crc kubenswrapper[4875]: W1126 00:30:14.436290 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-pod862039a4_64b5_4c36_b5e0_1046d2061b90.slice/crio-5e89576e015362f132b92265eec661752b59571cbd27302fad09b43901c5041f WatchSource:0}: Error finding container 5e89576e015362f132b92265eec661752b59571cbd27302fad09b43901c5041f: Status 404 returned error can't find the container with id 5e89576e015362f132b92265eec661752b59571cbd27302fad09b43901c5041f Nov 26 00:30:15 crc kubenswrapper[4875]: I1126 00:30:15.311490 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"862039a4-64b5-4c36-b5e0-1046d2061b90","Type":"ContainerStarted","Data":"54cb82746f41fd74a85b5a068fb685bec625371dad43a845f45596b5029227cc"} Nov 26 00:30:15 crc kubenswrapper[4875]: I1126 00:30:15.311962 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"862039a4-64b5-4c36-b5e0-1046d2061b90","Type":"ContainerStarted","Data":"5e89576e015362f132b92265eec661752b59571cbd27302fad09b43901c5041f"} Nov 26 00:30:16 crc kubenswrapper[4875]: I1126 00:30:16.318757 4875 generic.go:334] "Generic (PLEG): container finished" podID="862039a4-64b5-4c36-b5e0-1046d2061b90" containerID="54cb82746f41fd74a85b5a068fb685bec625371dad43a845f45596b5029227cc" exitCode=0 Nov 26 00:30:16 crc kubenswrapper[4875]: I1126 00:30:16.318873 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"862039a4-64b5-4c36-b5e0-1046d2061b90","Type":"ContainerDied","Data":"54cb82746f41fd74a85b5a068fb685bec625371dad43a845f45596b5029227cc"} Nov 26 00:30:17 crc kubenswrapper[4875]: I1126 00:30:17.537306 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 00:30:17 crc kubenswrapper[4875]: I1126 00:30:17.728259 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/862039a4-64b5-4c36-b5e0-1046d2061b90-kube-api-access\") pod \"862039a4-64b5-4c36-b5e0-1046d2061b90\" (UID: \"862039a4-64b5-4c36-b5e0-1046d2061b90\") " Nov 26 00:30:17 crc kubenswrapper[4875]: I1126 00:30:17.728319 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/862039a4-64b5-4c36-b5e0-1046d2061b90-kubelet-dir\") pod \"862039a4-64b5-4c36-b5e0-1046d2061b90\" (UID: \"862039a4-64b5-4c36-b5e0-1046d2061b90\") " Nov 26 00:30:17 crc kubenswrapper[4875]: I1126 00:30:17.728487 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/862039a4-64b5-4c36-b5e0-1046d2061b90-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "862039a4-64b5-4c36-b5e0-1046d2061b90" (UID: "862039a4-64b5-4c36-b5e0-1046d2061b90"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:30:17 crc kubenswrapper[4875]: I1126 00:30:17.728825 4875 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/862039a4-64b5-4c36-b5e0-1046d2061b90-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:17 crc kubenswrapper[4875]: I1126 00:30:17.733919 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/862039a4-64b5-4c36-b5e0-1046d2061b90-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "862039a4-64b5-4c36-b5e0-1046d2061b90" (UID: "862039a4-64b5-4c36-b5e0-1046d2061b90"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:30:17 crc kubenswrapper[4875]: I1126 00:30:17.829607 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/862039a4-64b5-4c36-b5e0-1046d2061b90-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 00:30:18 crc kubenswrapper[4875]: I1126 00:30:18.333887 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/revision-pruner-9-crc" Nov 26 00:30:18 crc kubenswrapper[4875]: I1126 00:30:18.333867 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/revision-pruner-9-crc" event={"ID":"862039a4-64b5-4c36-b5e0-1046d2061b90","Type":"ContainerDied","Data":"5e89576e015362f132b92265eec661752b59571cbd27302fad09b43901c5041f"} Nov 26 00:30:18 crc kubenswrapper[4875]: I1126 00:30:18.334346 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="5e89576e015362f132b92265eec661752b59571cbd27302fad09b43901c5041f" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.107743 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 00:30:20 crc kubenswrapper[4875]: E1126 00:30:20.108068 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="862039a4-64b5-4c36-b5e0-1046d2061b90" containerName="pruner" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.108085 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="862039a4-64b5-4c36-b5e0-1046d2061b90" containerName="pruner" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.108216 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="862039a4-64b5-4c36-b5e0-1046d2061b90" containerName="pruner" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.108762 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.113186 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver"/"installer-sa-dockercfg-5pr6n" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.113212 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver"/"kube-root-ca.crt" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.117214 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.256143 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kube-api-access\") pod \"installer-9-crc\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.256188 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kubelet-dir\") pod \"installer-9-crc\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.256207 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-var-lock\") pod \"installer-9-crc\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.357200 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kubelet-dir\") pod \"installer-9-crc\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.357240 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kube-api-access\") pod \"installer-9-crc\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.357261 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-var-lock\") pod \"installer-9-crc\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.357319 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kubelet-dir\") pod \"installer-9-crc\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.357369 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-var-lock\") pod \"installer-9-crc\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.382363 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kube-api-access\") pod \"installer-9-crc\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.437591 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:30:20 crc kubenswrapper[4875]: I1126 00:30:20.642021 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-kube-apiserver/installer-9-crc"] Nov 26 00:30:21 crc kubenswrapper[4875]: I1126 00:30:21.350891 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c","Type":"ContainerStarted","Data":"49200f3c697d2c58a58440a9a654a16e4e99a94d26f67a1963fb2145529cbb5c"} Nov 26 00:30:21 crc kubenswrapper[4875]: I1126 00:30:21.351203 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c","Type":"ContainerStarted","Data":"554215c11658cec7f32858b049e5096ea29645d86e4c3193eee9cc7f02f29238"} Nov 26 00:30:21 crc kubenswrapper[4875]: I1126 00:30:21.367802 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/installer-9-crc" podStartSLOduration=1.367781135 podStartE2EDuration="1.367781135s" podCreationTimestamp="2025-11-26 00:30:20 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:30:21.363595852 +0000 UTC m=+1394.553571547" watchObservedRunningTime="2025-11-26 00:30:21.367781135 +0000 UTC m=+1394.557756830" Nov 26 00:30:56 crc kubenswrapper[4875]: I1126 00:30:56.774613 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/certified-operators-546g6"] Nov 26 00:30:56 crc kubenswrapper[4875]: I1126 00:30:56.776530 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:30:56 crc kubenswrapper[4875]: I1126 00:30:56.785706 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-546g6"] Nov 26 00:30:56 crc kubenswrapper[4875]: I1126 00:30:56.926073 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-tct6s\" (UniqueName: \"kubernetes.io/projected/8703e7d9-1478-4664-90fa-0980c97ccbec-kube-api-access-tct6s\") pod \"certified-operators-546g6\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:30:56 crc kubenswrapper[4875]: I1126 00:30:56.926202 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-catalog-content\") pod \"certified-operators-546g6\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:30:56 crc kubenswrapper[4875]: I1126 00:30:56.926266 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-utilities\") pod \"certified-operators-546g6\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:30:57 crc kubenswrapper[4875]: I1126 00:30:57.028004 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-catalog-content\") pod \"certified-operators-546g6\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:30:57 crc kubenswrapper[4875]: I1126 00:30:57.028064 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-utilities\") pod \"certified-operators-546g6\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:30:57 crc kubenswrapper[4875]: I1126 00:30:57.028091 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-tct6s\" (UniqueName: \"kubernetes.io/projected/8703e7d9-1478-4664-90fa-0980c97ccbec-kube-api-access-tct6s\") pod \"certified-operators-546g6\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:30:57 crc kubenswrapper[4875]: I1126 00:30:57.028587 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-catalog-content\") pod \"certified-operators-546g6\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:30:57 crc kubenswrapper[4875]: I1126 00:30:57.028621 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-utilities\") pod \"certified-operators-546g6\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:30:57 crc kubenswrapper[4875]: I1126 00:30:57.051988 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-tct6s\" (UniqueName: \"kubernetes.io/projected/8703e7d9-1478-4664-90fa-0980c97ccbec-kube-api-access-tct6s\") pod \"certified-operators-546g6\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:30:57 crc kubenswrapper[4875]: I1126 00:30:57.093224 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:30:57 crc kubenswrapper[4875]: I1126 00:30:57.577400 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/certified-operators-546g6"] Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.541051 4875 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.542317 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.550054 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.550117 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.550214 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.550269 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.550355 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.551065 4875 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.551518 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" containerID="cri-o://66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2" gracePeriod=15 Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.551614 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" containerID="cri-o://cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5" gracePeriod=15 Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.551742 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" containerID="cri-o://2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0" gracePeriod=15 Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.551756 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" containerID="cri-o://4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec" gracePeriod=15 Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.551779 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" containerID="cri-o://ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7" gracePeriod=15 Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.555821 4875 kubelet.go:2421] "SyncLoop ADD" source="file" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 00:30:58 crc kubenswrapper[4875]: E1126 00:30:58.558192 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558221 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 00:30:58 crc kubenswrapper[4875]: E1126 00:30:58.558241 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558249 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="setup" Nov 26 00:30:58 crc kubenswrapper[4875]: E1126 00:30:58.558260 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558268 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 00:30:58 crc kubenswrapper[4875]: E1126 00:30:58.558279 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558287 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 00:30:58 crc kubenswrapper[4875]: E1126 00:30:58.558340 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558349 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 00:30:58 crc kubenswrapper[4875]: E1126 00:30:58.558360 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558368 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 00:30:58 crc kubenswrapper[4875]: E1126 00:30:58.558399 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558423 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558583 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558599 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558612 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-insecure-readyz" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558953 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-check-endpoints" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558970 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-regeneration-controller" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.558980 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="f4b27818a5e8e43d0dc095d08835c792" containerName="kube-apiserver-cert-syncer" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.574150 4875 generic.go:334] "Generic (PLEG): container finished" podID="8703e7d9-1478-4664-90fa-0980c97ccbec" containerID="3b8bbb67d3dfdba8e80fa8e3070c24fb5f146242f37421052622b0dbaa1edabc" exitCode=0 Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.574554 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-546g6" event={"ID":"8703e7d9-1478-4664-90fa-0980c97ccbec","Type":"ContainerDied","Data":"3b8bbb67d3dfdba8e80fa8e3070c24fb5f146242f37421052622b0dbaa1edabc"} Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.574628 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-546g6" event={"ID":"8703e7d9-1478-4664-90fa-0980c97ccbec","Type":"ContainerStarted","Data":"dc2e93f4e09909cc61bf30a8d2380a0b455450fa6033985d6d4803d1ccd9c7c4"} Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.576571 4875 provider.go:102] Refreshing cache for provider: *credentialprovider.defaultDockerConfigProvider Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.579574 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652322 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652371 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652422 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652447 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652467 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652399 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652515 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652543 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652508 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652564 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652618 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652705 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.652863 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"kube-apiserver-startup-monitor-crc\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.753526 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.753582 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.753607 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.753659 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-resource-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.753681 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-cert-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.753705 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/71bb4a3aecc4ba5b26c4b7318770ce13-audit-dir\") pod \"kube-apiserver-crc\" (UID: \"71bb4a3aecc4ba5b26c4b7318770ce13\") " pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: I1126 00:30:58.872505 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:30:58 crc kubenswrapper[4875]: W1126 00:30:58.888968 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-podf85e55b1a89d02b0cb034b1ea31ed45a.slice/crio-5c712b63337e2ac13f86b5ebccd6df0a4256596117b9fa0fa82e66ebee978c2e WatchSource:0}: Error finding container 5c712b63337e2ac13f86b5ebccd6df0a4256596117b9fa0fa82e66ebee978c2e: Status 404 returned error can't find the container with id 5c712b63337e2ac13f86b5ebccd6df0a4256596117b9fa0fa82e66ebee978c2e Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.582675 4875 generic.go:334] "Generic (PLEG): container finished" podID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" containerID="49200f3c697d2c58a58440a9a654a16e4e99a94d26f67a1963fb2145529cbb5c" exitCode=0 Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.582787 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c","Type":"ContainerDied","Data":"49200f3c697d2c58a58440a9a654a16e4e99a94d26f67a1963fb2145529cbb5c"} Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.584396 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10"} Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.584436 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" event={"ID":"f85e55b1a89d02b0cb034b1ea31ed45a","Type":"ContainerStarted","Data":"5c712b63337e2ac13f86b5ebccd6df0a4256596117b9fa0fa82e66ebee978c2e"} Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.587186 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-check-endpoints/0.log" Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.588332 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.588885 4875 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5" exitCode=0 Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.588901 4875 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec" exitCode=0 Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.588909 4875 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7" exitCode=0 Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.588917 4875 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0" exitCode=2 Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.588970 4875 scope.go:117] "RemoveContainer" containerID="b11de8b8743bf400bfb7ebb8852c6acb7032818b04857808cc15b73405cd5738" Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.595583 4875 generic.go:334] "Generic (PLEG): container finished" podID="8703e7d9-1478-4664-90fa-0980c97ccbec" containerID="bcb8f2fbb307e255f4b27231a137a72597ec171534574c16d7f6d50c71ce7f73" exitCode=0 Nov 26 00:30:59 crc kubenswrapper[4875]: I1126 00:30:59.595637 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-546g6" event={"ID":"8703e7d9-1478-4664-90fa-0980c97ccbec","Type":"ContainerDied","Data":"bcb8f2fbb307e255f4b27231a137a72597ec171534574c16d7f6d50c71ce7f73"} Nov 26 00:31:00 crc kubenswrapper[4875]: I1126 00:31:00.624038 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-546g6" event={"ID":"8703e7d9-1478-4664-90fa-0980c97ccbec","Type":"ContainerStarted","Data":"deea4738b64295297a7a551858bcde358d05536744d187a91eae429329c1f11a"} Nov 26 00:31:00 crc kubenswrapper[4875]: I1126 00:31:00.643532 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.017152 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.021849 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.022445 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.181950 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kube-api-access\") pod \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.181994 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kubelet-dir\") pod \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.182032 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.182103 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-var-lock\") pod \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\" (UID: \"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c\") " Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.182134 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.182136 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kubelet-dir" (OuterVolumeSpecName: "kubelet-dir") pod "4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" (UID: "4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c"). InnerVolumeSpecName "kubelet-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.182164 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") pod \"f4b27818a5e8e43d0dc095d08835c792\" (UID: \"f4b27818a5e8e43d0dc095d08835c792\") " Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.182172 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir" (OuterVolumeSpecName: "cert-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "cert-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.182194 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-var-lock" (OuterVolumeSpecName: "var-lock") pod "4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" (UID: "4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.182214 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir" (OuterVolumeSpecName: "audit-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "audit-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.182301 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f4b27818a5e8e43d0dc095d08835c792" (UID: "f4b27818a5e8e43d0dc095d08835c792"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.183016 4875 reconciler_common.go:293] "Volume detached for volume \"cert-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-cert-dir\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.183041 4875 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.183055 4875 reconciler_common.go:293] "Volume detached for volume \"audit-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-audit-dir\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.183067 4875 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f4b27818a5e8e43d0dc095d08835c792-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.183078 4875 reconciler_common.go:293] "Volume detached for volume \"kubelet-dir\" (UniqueName: \"kubernetes.io/host-path/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kubelet-dir\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.187618 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kube-api-access" (OuterVolumeSpecName: "kube-api-access") pod "4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" (UID: "4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c"). InnerVolumeSpecName "kube-api-access". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.284583 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access\" (UniqueName: \"kubernetes.io/projected/4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c-kube-api-access\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.536826 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f4b27818a5e8e43d0dc095d08835c792" path="/var/lib/kubelet/pods/f4b27818a5e8e43d0dc095d08835c792/volumes" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.651977 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/installer-9-crc" event={"ID":"4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c","Type":"ContainerDied","Data":"554215c11658cec7f32858b049e5096ea29645d86e4c3193eee9cc7f02f29238"} Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.652014 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="554215c11658cec7f32858b049e5096ea29645d86e4c3193eee9cc7f02f29238" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.652012 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/installer-9-crc" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.655615 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-crc_f4b27818a5e8e43d0dc095d08835c792/kube-apiserver-cert-syncer/0.log" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.656359 4875 generic.go:334] "Generic (PLEG): container finished" podID="f4b27818a5e8e43d0dc095d08835c792" containerID="66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2" exitCode=0 Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.656458 4875 scope.go:117] "RemoveContainer" containerID="cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.656529 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.681746 4875 scope.go:117] "RemoveContainer" containerID="4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.698219 4875 scope.go:117] "RemoveContainer" containerID="ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.712844 4875 scope.go:117] "RemoveContainer" containerID="2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.728967 4875 scope.go:117] "RemoveContainer" containerID="66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.746769 4875 scope.go:117] "RemoveContainer" containerID="f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.769295 4875 scope.go:117] "RemoveContainer" containerID="cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5" Nov 26 00:31:01 crc kubenswrapper[4875]: E1126 00:31:01.769761 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\": container with ID starting with cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5 not found: ID does not exist" containerID="cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.769794 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5"} err="failed to get container status \"cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\": rpc error: code = NotFound desc = could not find container \"cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5\": container with ID starting with cfa5eb528c4004040ec298401057dc88e2f181372b4c9b1a10129cbdd5e79ee5 not found: ID does not exist" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.769815 4875 scope.go:117] "RemoveContainer" containerID="4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec" Nov 26 00:31:01 crc kubenswrapper[4875]: E1126 00:31:01.770106 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\": container with ID starting with 4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec not found: ID does not exist" containerID="4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.770161 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec"} err="failed to get container status \"4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\": rpc error: code = NotFound desc = could not find container \"4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec\": container with ID starting with 4a0c5a62c0b3a9ecd2aa3218b9c93706a015093a24cd21fe2cf1f0ac115e9fec not found: ID does not exist" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.770195 4875 scope.go:117] "RemoveContainer" containerID="ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7" Nov 26 00:31:01 crc kubenswrapper[4875]: E1126 00:31:01.770519 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\": container with ID starting with ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7 not found: ID does not exist" containerID="ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.770540 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7"} err="failed to get container status \"ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\": rpc error: code = NotFound desc = could not find container \"ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7\": container with ID starting with ab04ffc92fab0a59cdd1f20a307f14ada82108d4d8c6dbe785cec8d304ac31c7 not found: ID does not exist" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.770561 4875 scope.go:117] "RemoveContainer" containerID="2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0" Nov 26 00:31:01 crc kubenswrapper[4875]: E1126 00:31:01.770875 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\": container with ID starting with 2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0 not found: ID does not exist" containerID="2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.770894 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0"} err="failed to get container status \"2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\": rpc error: code = NotFound desc = could not find container \"2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0\": container with ID starting with 2f88304172e2876b1bc7f4ab198914f329751abf3e380070b6bef91988c6fcf0 not found: ID does not exist" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.770906 4875 scope.go:117] "RemoveContainer" containerID="66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2" Nov 26 00:31:01 crc kubenswrapper[4875]: E1126 00:31:01.771133 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\": container with ID starting with 66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2 not found: ID does not exist" containerID="66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.771169 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2"} err="failed to get container status \"66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\": rpc error: code = NotFound desc = could not find container \"66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2\": container with ID starting with 66023f10903e6416f052f68315e06f2326e5d0394d33c942cf380102bbbac8f2 not found: ID does not exist" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.771187 4875 scope.go:117] "RemoveContainer" containerID="f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654" Nov 26 00:31:01 crc kubenswrapper[4875]: E1126 00:31:01.771466 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\": container with ID starting with f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654 not found: ID does not exist" containerID="f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654" Nov 26 00:31:01 crc kubenswrapper[4875]: I1126 00:31:01.771496 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654"} err="failed to get container status \"f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\": rpc error: code = NotFound desc = could not find container \"f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654\": container with ID starting with f3b56fd972f9b8309513aa1d19ec9e7eb68e2a9544591c10563aa96ccf86e654 not found: ID does not exist" Nov 26 00:31:02 crc kubenswrapper[4875]: E1126 00:31:02.979898 4875 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:02 crc kubenswrapper[4875]: E1126 00:31:02.980355 4875 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:02 crc kubenswrapper[4875]: E1126 00:31:02.980699 4875 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:02 crc kubenswrapper[4875]: E1126 00:31:02.981007 4875 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:02 crc kubenswrapper[4875]: E1126 00:31:02.981251 4875 controller.go:195] "Failed to update lease" err="Put \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:02 crc kubenswrapper[4875]: I1126 00:31:02.981283 4875 controller.go:115] "failed to update lease using latest lease, fallback to ensure lease" err="failed 5 attempts to update lease" Nov 26 00:31:02 crc kubenswrapper[4875]: E1126 00:31:02.981596 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" interval="200ms" Nov 26 00:31:03 crc kubenswrapper[4875]: E1126 00:31:03.182169 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" interval="400ms" Nov 26 00:31:03 crc kubenswrapper[4875]: I1126 00:31:03.578512 4875 status_manager.go:851] "Failed to get status for pod" podUID="f4b27818a5e8e43d0dc095d08835c792" pod="openshift-kube-apiserver/kube-apiserver-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:03 crc kubenswrapper[4875]: I1126 00:31:03.579171 4875 status_manager.go:851] "Failed to get status for pod" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" pod="openshift-marketplace/certified-operators-546g6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-546g6\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:03 crc kubenswrapper[4875]: I1126 00:31:03.579776 4875 status_manager.go:851] "Failed to get status for pod" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:03 crc kubenswrapper[4875]: I1126 00:31:03.580244 4875 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:03 crc kubenswrapper[4875]: I1126 00:31:03.580580 4875 status_manager.go:851] "Failed to get status for pod" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" pod="openshift-marketplace/certified-operators-546g6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-546g6\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:03 crc kubenswrapper[4875]: E1126 00:31:03.581897 4875 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.129.56.238:6443: connect: connection refused" event="&Event{ObjectMeta:{certified-operators-546g6.187b671e3aa5aa9a openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:certified-operators-546g6,UID:8703e7d9-1478-4664-90fa-0980c97ccbec,APIVersion:v1,ResourceVersion:35828,FieldPath:spec.initContainers{extract-content},},Reason:Pulling,Message:Pulling image \"registry.redhat.io/redhat/certified-operator-index:v4.18\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 00:30:58.576296602 +0000 UTC m=+1431.766272297,LastTimestamp:2025-11-26 00:30:58.576296602 +0000 UTC m=+1431.766272297,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 00:31:03 crc kubenswrapper[4875]: E1126 00:31:03.583207 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" interval="800ms" Nov 26 00:31:04 crc kubenswrapper[4875]: E1126 00:31:04.383882 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" interval="1.6s" Nov 26 00:31:05 crc kubenswrapper[4875]: E1126 00:31:05.985152 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" interval="3.2s" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.094070 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.094790 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.134716 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.135161 4875 status_manager.go:851] "Failed to get status for pod" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" pod="openshift-marketplace/certified-operators-546g6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-546g6\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.135705 4875 status_manager.go:851] "Failed to get status for pod" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.135960 4875 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.512143 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.512542 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.532018 4875 status_manager.go:851] "Failed to get status for pod" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" pod="openshift-marketplace/certified-operators-546g6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-546g6\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.532592 4875 status_manager.go:851] "Failed to get status for pod" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.532860 4875 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.730889 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.731619 4875 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.732236 4875 status_manager.go:851] "Failed to get status for pod" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" pod="openshift-marketplace/certified-operators-546g6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-546g6\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:07 crc kubenswrapper[4875]: I1126 00:31:07.732524 4875 status_manager.go:851] "Failed to get status for pod" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:08 crc kubenswrapper[4875]: E1126 00:31:08.077633 4875 event.go:368] "Unable to write event (may retry after sleeping)" err="Post \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/events\": dial tcp 38.129.56.238:6443: connect: connection refused" event="&Event{ObjectMeta:{certified-operators-546g6.187b671e3aa5aa9a openshift-marketplace 0 0001-01-01 00:00:00 +0000 UTC map[] map[] [] [] []},InvolvedObject:ObjectReference{Kind:Pod,Namespace:openshift-marketplace,Name:certified-operators-546g6,UID:8703e7d9-1478-4664-90fa-0980c97ccbec,APIVersion:v1,ResourceVersion:35828,FieldPath:spec.initContainers{extract-content},},Reason:Pulling,Message:Pulling image \"registry.redhat.io/redhat/certified-operator-index:v4.18\",Source:EventSource{Component:kubelet,Host:crc,},FirstTimestamp:2025-11-26 00:30:58.576296602 +0000 UTC m=+1431.766272297,LastTimestamp:2025-11-26 00:30:58.576296602 +0000 UTC m=+1431.766272297,Count:1,Type:Normal,EventTime:0001-01-01 00:00:00 +0000 UTC,Series:nil,Action:,Related:nil,ReportingController:kubelet,ReportingInstance:crc,}" Nov 26 00:31:09 crc kubenswrapper[4875]: E1126 00:31:09.186074 4875 controller.go:145] "Failed to ensure lease exists, will retry" err="Get \"https://api-int.crc.testing:6443/apis/coordination.k8s.io/v1/namespaces/kube-node-lease/leases/crc?timeout=10s\": dial tcp 38.129.56.238:6443: connect: connection refused" interval="6.4s" Nov 26 00:31:09 crc kubenswrapper[4875]: I1126 00:31:09.707283 4875 generic.go:334] "Generic (PLEG): container finished" podID="5fdc5235-fab2-4d96-8526-1ca2270a92ac" containerID="80a4540d847863a704d9f59865aec1dc26647043be8452f597439ce69a7378be" exitCode=0 Nov 26 00:31:09 crc kubenswrapper[4875]: I1126 00:31:09.707577 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" event={"ID":"5fdc5235-fab2-4d96-8526-1ca2270a92ac","Type":"ContainerDied","Data":"80a4540d847863a704d9f59865aec1dc26647043be8452f597439ce69a7378be"} Nov 26 00:31:09 crc kubenswrapper[4875]: I1126 00:31:09.708233 4875 scope.go:117] "RemoveContainer" containerID="80a4540d847863a704d9f59865aec1dc26647043be8452f597439ce69a7378be" Nov 26 00:31:09 crc kubenswrapper[4875]: I1126 00:31:09.708668 4875 status_manager.go:851] "Failed to get status for pod" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" pod="openshift-marketplace/certified-operators-546g6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-546g6\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:09 crc kubenswrapper[4875]: I1126 00:31:09.708991 4875 status_manager.go:851] "Failed to get status for pod" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:09 crc kubenswrapper[4875]: I1126 00:31:09.709326 4875 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:09 crc kubenswrapper[4875]: I1126 00:31:09.709661 4875 status_manager.go:851] "Failed to get status for pod" podUID="5fdc5235-fab2-4d96-8526-1ca2270a92ac" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/service-telemetry/pods/elastic-operator-bf7b77755-xjqrk\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:10 crc kubenswrapper[4875]: I1126 00:31:10.716776 4875 generic.go:334] "Generic (PLEG): container finished" podID="5fdc5235-fab2-4d96-8526-1ca2270a92ac" containerID="64045b1bc3ef4cb2909abed53ac9d140f78631609d7adcc0fd2691dae72dd7cf" exitCode=0 Nov 26 00:31:10 crc kubenswrapper[4875]: I1126 00:31:10.716834 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" event={"ID":"5fdc5235-fab2-4d96-8526-1ca2270a92ac","Type":"ContainerDied","Data":"64045b1bc3ef4cb2909abed53ac9d140f78631609d7adcc0fd2691dae72dd7cf"} Nov 26 00:31:10 crc kubenswrapper[4875]: I1126 00:31:10.717177 4875 scope.go:117] "RemoveContainer" containerID="80a4540d847863a704d9f59865aec1dc26647043be8452f597439ce69a7378be" Nov 26 00:31:10 crc kubenswrapper[4875]: I1126 00:31:10.718147 4875 status_manager.go:851] "Failed to get status for pod" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" pod="openshift-marketplace/certified-operators-546g6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-546g6\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:10 crc kubenswrapper[4875]: I1126 00:31:10.718441 4875 scope.go:117] "RemoveContainer" containerID="64045b1bc3ef4cb2909abed53ac9d140f78631609d7adcc0fd2691dae72dd7cf" Nov 26 00:31:10 crc kubenswrapper[4875]: I1126 00:31:10.718864 4875 status_manager.go:851] "Failed to get status for pod" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:10 crc kubenswrapper[4875]: E1126 00:31:10.719200 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 10s restarting failed container=manager pod=elastic-operator-bf7b77755-xjqrk_service-telemetry(5fdc5235-fab2-4d96-8526-1ca2270a92ac)\"" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" podUID="5fdc5235-fab2-4d96-8526-1ca2270a92ac" Nov 26 00:31:10 crc kubenswrapper[4875]: I1126 00:31:10.719209 4875 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:10 crc kubenswrapper[4875]: I1126 00:31:10.719799 4875 status_manager.go:851] "Failed to get status for pod" podUID="5fdc5235-fab2-4d96-8526-1ca2270a92ac" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/service-telemetry/pods/elastic-operator-bf7b77755-xjqrk\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:11 crc kubenswrapper[4875]: I1126 00:31:11.528172 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:11 crc kubenswrapper[4875]: I1126 00:31:11.529244 4875 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:11 crc kubenswrapper[4875]: I1126 00:31:11.529499 4875 status_manager.go:851] "Failed to get status for pod" podUID="5fdc5235-fab2-4d96-8526-1ca2270a92ac" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/service-telemetry/pods/elastic-operator-bf7b77755-xjqrk\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:11 crc kubenswrapper[4875]: I1126 00:31:11.529864 4875 status_manager.go:851] "Failed to get status for pod" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" pod="openshift-marketplace/certified-operators-546g6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-546g6\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:11 crc kubenswrapper[4875]: I1126 00:31:11.530318 4875 status_manager.go:851] "Failed to get status for pod" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:11 crc kubenswrapper[4875]: I1126 00:31:11.544672 4875 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="c6c8a175-7cc7-4e45-8e37-d431994e5526" Nov 26 00:31:11 crc kubenswrapper[4875]: I1126 00:31:11.544711 4875 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="c6c8a175-7cc7-4e45-8e37-d431994e5526" Nov 26 00:31:11 crc kubenswrapper[4875]: E1126 00:31:11.545199 4875 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:11 crc kubenswrapper[4875]: I1126 00:31:11.545808 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:11 crc kubenswrapper[4875]: W1126 00:31:11.567552 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod71bb4a3aecc4ba5b26c4b7318770ce13.slice/crio-5f0269fa85f67d6cf48c00176bbd5eb384ae66a1ae3ccbc8b4e77b3f7e9b1395 WatchSource:0}: Error finding container 5f0269fa85f67d6cf48c00176bbd5eb384ae66a1ae3ccbc8b4e77b3f7e9b1395: Status 404 returned error can't find the container with id 5f0269fa85f67d6cf48c00176bbd5eb384ae66a1ae3ccbc8b4e77b3f7e9b1395 Nov 26 00:31:11 crc kubenswrapper[4875]: I1126 00:31:11.724997 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"5f0269fa85f67d6cf48c00176bbd5eb384ae66a1ae3ccbc8b4e77b3f7e9b1395"} Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.734636 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.735369 4875 generic.go:334] "Generic (PLEG): container finished" podID="f614b9022728cf315e60c057852e563e" containerID="0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5" exitCode=1 Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.735451 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerDied","Data":"0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5"} Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.736049 4875 scope.go:117] "RemoveContainer" containerID="0dc301396f467d1c7d0af549fa7bf7daf1542a2aa87bf3ece820ce8e21a933d5" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.736509 4875 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.737177 4875 status_manager.go:851] "Failed to get status for pod" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.737524 4875 generic.go:334] "Generic (PLEG): container finished" podID="71bb4a3aecc4ba5b26c4b7318770ce13" containerID="7b4ddb27df5f003e593db0d1ba97a1bef29b014d848419d298a59866febb9fe5" exitCode=0 Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.737541 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerDied","Data":"7b4ddb27df5f003e593db0d1ba97a1bef29b014d848419d298a59866febb9fe5"} Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.737857 4875 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="c6c8a175-7cc7-4e45-8e37-d431994e5526" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.737931 4875 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="c6c8a175-7cc7-4e45-8e37-d431994e5526" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.737889 4875 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.738269 4875 status_manager.go:851] "Failed to get status for pod" podUID="5fdc5235-fab2-4d96-8526-1ca2270a92ac" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/service-telemetry/pods/elastic-operator-bf7b77755-xjqrk\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:12 crc kubenswrapper[4875]: E1126 00:31:12.738392 4875 mirror_client.go:138] "Failed deleting a mirror pod" err="Delete \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.738499 4875 status_manager.go:851] "Failed to get status for pod" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" pod="openshift-marketplace/certified-operators-546g6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-546g6\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.739163 4875 status_manager.go:851] "Failed to get status for pod" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" pod="openshift-kube-apiserver/installer-9-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/installer-9-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.739609 4875 status_manager.go:851] "Failed to get status for pod" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-apiserver/pods/kube-apiserver-startup-monitor-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.739878 4875 status_manager.go:851] "Failed to get status for pod" podUID="5fdc5235-fab2-4d96-8526-1ca2270a92ac" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/service-telemetry/pods/elastic-operator-bf7b77755-xjqrk\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.740203 4875 status_manager.go:851] "Failed to get status for pod" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" pod="openshift-marketplace/certified-operators-546g6" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-marketplace/pods/certified-operators-546g6\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:12 crc kubenswrapper[4875]: I1126 00:31:12.740473 4875 status_manager.go:851] "Failed to get status for pod" podUID="f614b9022728cf315e60c057852e563e" pod="openshift-kube-controller-manager/kube-controller-manager-crc" err="Get \"https://api-int.crc.testing:6443/api/v1/namespaces/openshift-kube-controller-manager/pods/kube-controller-manager-crc\": dial tcp 38.129.56.238:6443: connect: connection refused" Nov 26 00:31:13 crc kubenswrapper[4875]: I1126 00:31:13.748040 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-controller-manager_kube-controller-manager-crc_f614b9022728cf315e60c057852e563e/kube-controller-manager/0.log" Nov 26 00:31:13 crc kubenswrapper[4875]: I1126 00:31:13.748452 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-controller-manager/kube-controller-manager-crc" event={"ID":"f614b9022728cf315e60c057852e563e","Type":"ContainerStarted","Data":"f864ce6fed55e0fe32aff2375293e772a8027a49aabc0c3eeb87ccdeafe50a5e"} Nov 26 00:31:13 crc kubenswrapper[4875]: I1126 00:31:13.752561 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"928ca3cbe09c471d4f6c3e8f4ba0158b6b0d7aef7c04feeed41000cef750d01d"} Nov 26 00:31:13 crc kubenswrapper[4875]: I1126 00:31:13.752607 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"61bf6f7e749fd2d57359d88dedbc93afa840360377df6c444d46f1ad5517c44d"} Nov 26 00:31:13 crc kubenswrapper[4875]: I1126 00:31:13.752617 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"c949753bf9b4dcf1972ff218eafae13a85042e60c74b9d971269ee5fe9f5c8e1"} Nov 26 00:31:13 crc kubenswrapper[4875]: I1126 00:31:13.752636 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"001a0f09dc05e5a4935625c13d26b8864729f6bb265f2a9800233c456e9cafc0"} Nov 26 00:31:13 crc kubenswrapper[4875]: I1126 00:31:13.783749 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:31:14 crc kubenswrapper[4875]: I1126 00:31:14.761706 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-kube-apiserver/kube-apiserver-crc" event={"ID":"71bb4a3aecc4ba5b26c4b7318770ce13","Type":"ContainerStarted","Data":"97b254c9250e23764f01064c78aa1365daff9843f596ff6e973e21864d6d053c"} Nov 26 00:31:14 crc kubenswrapper[4875]: I1126 00:31:14.761887 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:14 crc kubenswrapper[4875]: I1126 00:31:14.762002 4875 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="c6c8a175-7cc7-4e45-8e37-d431994e5526" Nov 26 00:31:14 crc kubenswrapper[4875]: I1126 00:31:14.762028 4875 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="c6c8a175-7cc7-4e45-8e37-d431994e5526" Nov 26 00:31:16 crc kubenswrapper[4875]: I1126 00:31:16.546203 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:16 crc kubenswrapper[4875]: I1126 00:31:16.546579 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:16 crc kubenswrapper[4875]: I1126 00:31:16.551912 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:17 crc kubenswrapper[4875]: I1126 00:31:17.116440 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:31:17 crc kubenswrapper[4875]: I1126 00:31:17.120357 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:31:19 crc kubenswrapper[4875]: I1126 00:31:19.769251 4875 kubelet.go:1914] "Deleted mirror pod because it is outdated" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:19 crc kubenswrapper[4875]: I1126 00:31:19.790007 4875 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="c6c8a175-7cc7-4e45-8e37-d431994e5526" Nov 26 00:31:19 crc kubenswrapper[4875]: I1126 00:31:19.790238 4875 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="c6c8a175-7cc7-4e45-8e37-d431994e5526" Nov 26 00:31:19 crc kubenswrapper[4875]: I1126 00:31:19.794255 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:19 crc kubenswrapper[4875]: I1126 00:31:19.796248 4875 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="1a7b268b-11a6-42fe-aa47-b03af2b5e02d" Nov 26 00:31:20 crc kubenswrapper[4875]: I1126 00:31:20.794906 4875 kubelet.go:1909] "Trying to delete pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="c6c8a175-7cc7-4e45-8e37-d431994e5526" Nov 26 00:31:20 crc kubenswrapper[4875]: I1126 00:31:20.794935 4875 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-crc" podUID="c6c8a175-7cc7-4e45-8e37-d431994e5526" Nov 26 00:31:23 crc kubenswrapper[4875]: I1126 00:31:23.528707 4875 scope.go:117] "RemoveContainer" containerID="64045b1bc3ef4cb2909abed53ac9d140f78631609d7adcc0fd2691dae72dd7cf" Nov 26 00:31:23 crc kubenswrapper[4875]: I1126 00:31:23.787119 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-controller-manager/kube-controller-manager-crc" Nov 26 00:31:23 crc kubenswrapper[4875]: I1126 00:31:23.817708 4875 generic.go:334] "Generic (PLEG): container finished" podID="5fdc5235-fab2-4d96-8526-1ca2270a92ac" containerID="37ee2fb3c4f9587c099434441f8a051c73d91c5cbcfda08bce7fe55cba41b95d" exitCode=0 Nov 26 00:31:23 crc kubenswrapper[4875]: I1126 00:31:23.817754 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" event={"ID":"5fdc5235-fab2-4d96-8526-1ca2270a92ac","Type":"ContainerDied","Data":"37ee2fb3c4f9587c099434441f8a051c73d91c5cbcfda08bce7fe55cba41b95d"} Nov 26 00:31:23 crc kubenswrapper[4875]: I1126 00:31:23.817805 4875 scope.go:117] "RemoveContainer" containerID="64045b1bc3ef4cb2909abed53ac9d140f78631609d7adcc0fd2691dae72dd7cf" Nov 26 00:31:23 crc kubenswrapper[4875]: I1126 00:31:23.818244 4875 scope.go:117] "RemoveContainer" containerID="37ee2fb3c4f9587c099434441f8a051c73d91c5cbcfda08bce7fe55cba41b95d" Nov 26 00:31:23 crc kubenswrapper[4875]: E1126 00:31:23.818451 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=elastic-operator-bf7b77755-xjqrk_service-telemetry(5fdc5235-fab2-4d96-8526-1ca2270a92ac)\"" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" podUID="5fdc5235-fab2-4d96-8526-1ca2270a92ac" Nov 26 00:31:27 crc kubenswrapper[4875]: I1126 00:31:27.558918 4875 status_manager.go:861] "Pod was deleted and then recreated, skipping status update" pod="openshift-kube-apiserver/kube-apiserver-crc" oldPodUID="71bb4a3aecc4ba5b26c4b7318770ce13" podUID="1a7b268b-11a6-42fe-aa47-b03af2b5e02d" Nov 26 00:31:29 crc kubenswrapper[4875]: I1126 00:31:29.065016 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"openshift-apiserver-sa-dockercfg-djjff" Nov 26 00:31:29 crc kubenswrapper[4875]: I1126 00:31:29.187966 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"kube-root-ca.crt" Nov 26 00:31:29 crc kubenswrapper[4875]: I1126 00:31:29.378525 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-session" Nov 26 00:31:29 crc kubenswrapper[4875]: I1126 00:31:29.519019 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"openshift-service-ca.crt" Nov 26 00:31:30 crc kubenswrapper[4875]: I1126 00:31:30.419383 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"openshift-service-ca.crt" Nov 26 00:31:30 crc kubenswrapper[4875]: I1126 00:31:30.475211 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"default-dockercfg-2q5b6" Nov 26 00:31:30 crc kubenswrapper[4875]: I1126 00:31:30.606391 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-dockercfg-jwfmh" Nov 26 00:31:30 crc kubenswrapper[4875]: I1126 00:31:30.625156 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"kube-root-ca.crt" Nov 26 00:31:30 crc kubenswrapper[4875]: I1126 00:31:30.947894 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"kube-root-ca.crt" Nov 26 00:31:30 crc kubenswrapper[4875]: I1126 00:31:30.979574 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-rbac-proxy" Nov 26 00:31:31 crc kubenswrapper[4875]: I1126 00:31:31.108978 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-serving-cert" Nov 26 00:31:31 crc kubenswrapper[4875]: I1126 00:31:31.198297 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-tls" Nov 26 00:31:31 crc kubenswrapper[4875]: I1126 00:31:31.246697 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-service-ca.crt" Nov 26 00:31:31 crc kubenswrapper[4875]: I1126 00:31:31.585313 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"config" Nov 26 00:31:31 crc kubenswrapper[4875]: I1126 00:31:31.644562 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"ovnkube-identity-cm" Nov 26 00:31:31 crc kubenswrapper[4875]: I1126 00:31:31.683884 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"kube-root-ca.crt" Nov 26 00:31:31 crc kubenswrapper[4875]: I1126 00:31:31.712760 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-root-ca.crt" Nov 26 00:31:31 crc kubenswrapper[4875]: I1126 00:31:31.841092 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"openshift-service-ca.crt" Nov 26 00:31:31 crc kubenswrapper[4875]: I1126 00:31:31.868027 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-service-ca-bundle" Nov 26 00:31:31 crc kubenswrapper[4875]: I1126 00:31:31.917966 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-ocp-branding-template" Nov 26 00:31:31 crc kubenswrapper[4875]: I1126 00:31:31.929705 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"console-operator-config" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.048766 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"kube-root-ca.crt" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.072858 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"oauth-openshift-dockercfg-znhcc" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.104487 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-xpack-file-realm" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.139430 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"openshift-service-ca.crt" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.140381 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-serving-cert" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.141581 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"openshift-service-ca.crt" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.228486 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-default-es-config" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.356181 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"kube-root-ca.crt" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.605053 4875 reflector.go:368] Caches populated for *v1.CSIDriver from k8s.io/client-go/informers/factory.go:160 Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.616130 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"openshift-service-ca.crt" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.646133 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-http-certs-internal" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.705937 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-console"/"networking-console-plugin" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.847945 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"openshift-service-ca.crt" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.847989 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-metrics" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.925060 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-config" Nov 26 00:31:32 crc kubenswrapper[4875]: I1126 00:31:32.954665 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-node-metrics-cert" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.082004 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ac-dockercfg-9lkdf" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.093825 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"kube-root-ca.crt" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.112935 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"kube-root-ca.crt" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.143748 4875 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager-operator"/"cert-manager-operator-controller-manager-dockercfg-sdm89" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.144987 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-idp-0-file-data" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.185516 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"openshift-service-ca.crt" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.231795 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"openshift-service-ca.crt" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.343858 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"service-ca" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.395342 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-config" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.429744 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-daemon-dockercfg-r5tcq" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.442933 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"config" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.522228 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-serving-cert" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.564617 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"serving-cert" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.745382 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"openshift-service-ca.crt" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.787232 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"trusted-ca" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.805779 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"redhat-operators-dockercfg-ct8rh" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.811007 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"service-ca-operator-config" Nov 26 00:31:33 crc kubenswrapper[4875]: I1126 00:31:33.976726 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"kube-root-ca.crt" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.071256 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"kube-root-ca.crt" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.072894 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"encryption-config-1" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.186069 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-ca-bundle" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.195448 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"cni-copy-resources" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.218169 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"prometheus-webhook-snmp-2-global-ca" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.267663 4875 reflector.go:368] Caches populated for *v1.Secret from object-"hostpath-provisioner"/"csi-hostpath-provisioner-sa-dockercfg-qd74k" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.387880 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-service-ca.crt" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.427028 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"kube-root-ca.crt" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.543234 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"openshift-service-ca.crt" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.689458 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"perses-operator-dockercfg-hzg29" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.690978 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"kube-root-ca.crt" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.691525 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"serving-cert" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.824495 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-image-registry"/"image-registry-certificates" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.830720 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"builder-dockercfg-5d5px" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.950123 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"node-resolver-dockercfg-kz9s7" Nov 26 00:31:34 crc kubenswrapper[4875]: I1126 00:31:34.983904 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"client-ca" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.080514 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-internal-users" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.082924 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-control-plane-dockercfg-gs7dd" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.119108 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"serving-cert" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.161455 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-rbac-proxy" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.208609 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"trusted-ca" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.293700 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"node-ca-dockercfg-4777p" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.296832 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"machine-approver-config" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.352390 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"service-ca-bundle" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.407484 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"kube-root-ca.crt" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.414762 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"image-import-ca" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.538922 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-tls" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.542837 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"kube-root-ca.crt" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.547250 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"default-dockercfg-chnjx" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.690045 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serviceaccount-dockercfg-rq7zk" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.736004 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"olm-operator-serving-cert" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.796642 4875 reflector.go:368] Caches populated for *v1.Node from k8s.io/client-go/informers/factory.go:160 Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.811900 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"serving-cert" Nov 26 00:31:35 crc kubenswrapper[4875]: I1126 00:31:35.898691 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"audit" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.025273 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-dockercfg-gkqpw" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.146945 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-operator-tls" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.162367 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"kube-root-ca.crt" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.195041 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"default-dockercfg-2llfx" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.222552 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"openshift-service-ca.crt" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.298938 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"kube-root-ca.crt" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.358164 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mcc-proxy-tls" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.385042 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-tls" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.501589 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operators"/"kube-root-ca.crt" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.576658 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-default-es-transport-certs" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.578357 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-provider-selection" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.625794 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"kube-root-ca.crt" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.630071 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elastic-operator-service-cert" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.706359 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"openshift-service-ca.crt" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.723575 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-trusted-ca-bundle" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.784986 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"catalog-operator-serving-cert" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.843793 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-root-ca.crt" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.867790 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-operator-dockercfg-98p87" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.876844 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"openshift-service-ca.crt" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.884081 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"dns-operator-dockercfg-9mqw5" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.940092 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-dockercfg-f62pw" Nov 26 00:31:36 crc kubenswrapper[4875]: I1126 00:31:36.948070 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"openshift-service-ca.crt" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.134579 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"prometheus-webhook-snmp-2-ca" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.149000 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-ancillary-tools-dockercfg-vnmsz" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.263916 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"kube-root-ca.crt" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.315615 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-service-ca" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.329273 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"openshift-service-ca.crt" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.416341 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-secret" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.512596 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.512672 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.585167 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-config-operator"/"openshift-service-ca.crt" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.593629 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager-operator"/"kube-root-ca.crt" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.640157 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"env-overrides" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.675577 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-error" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.709483 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"openshift-service-ca.crt" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.717573 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"cluster-version-operator-serving-cert" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.732355 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"env-overrides" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.794831 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-certs-default" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.801804 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-oauth-config" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.818127 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"openshift-service-ca.crt" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.865939 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-dockercfg-zdk86" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.890540 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"audit-1" Nov 26 00:31:37 crc kubenswrapper[4875]: I1126 00:31:37.900943 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"kube-root-ca.crt" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.115798 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-tls" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.271164 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"config-operator-serving-cert" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.281502 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"proxy-tls" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.307885 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"config" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.357296 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-operator-dockercfg-r9srn" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.442862 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"package-server-manager-serving-cert" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.490131 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"openshift-service-ca.crt" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.498959 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication-operator"/"authentication-operator-dockercfg-mz9bj" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.507864 4875 reflector.go:368] Caches populated for *v1.Pod from pkg/kubelet/config/apiserver.go:66 Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.510894 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podStartSLOduration=40.510875359 podStartE2EDuration="40.510875359s" podCreationTimestamp="2025-11-26 00:30:58 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:31:19.420662988 +0000 UTC m=+1452.610638683" watchObservedRunningTime="2025-11-26 00:31:38.510875359 +0000 UTC m=+1471.700851054" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.512783 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/certified-operators-546g6" podStartSLOduration=41.099688647 podStartE2EDuration="42.512775901s" podCreationTimestamp="2025-11-26 00:30:56 +0000 UTC" firstStartedPulling="2025-11-26 00:30:58.576293022 +0000 UTC m=+1431.766268717" lastFinishedPulling="2025-11-26 00:30:59.989380286 +0000 UTC m=+1433.179355971" observedRunningTime="2025-11-26 00:31:19.365193112 +0000 UTC m=+1452.555168807" watchObservedRunningTime="2025-11-26 00:31:38.512775901 +0000 UTC m=+1471.702751596" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.513963 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.514007 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-kube-apiserver/kube-apiserver-crc"] Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.519193 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-kube-apiserver/kube-apiserver-crc" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.528913 4875 scope.go:117] "RemoveContainer" containerID="37ee2fb3c4f9587c099434441f8a051c73d91c5cbcfda08bce7fe55cba41b95d" Nov 26 00:31:38 crc kubenswrapper[4875]: E1126 00:31:38.529095 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"manager\" with CrashLoopBackOff: \"back-off 20s restarting failed container=manager pod=elastic-operator-bf7b77755-xjqrk_service-telemetry(5fdc5235-fab2-4d96-8526-1ca2270a92ac)\"" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" podUID="5fdc5235-fab2-4d96-8526-1ca2270a92ac" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.536580 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-kube-apiserver/kube-apiserver-crc" podStartSLOduration=19.536565357 podStartE2EDuration="19.536565357s" podCreationTimestamp="2025-11-26 00:31:19 +0000 UTC" firstStartedPulling="0001-01-01 00:00:00 +0000 UTC" lastFinishedPulling="0001-01-01 00:00:00 +0000 UTC" observedRunningTime="2025-11-26 00:31:38.533824322 +0000 UTC m=+1471.723800027" watchObservedRunningTime="2025-11-26 00:31:38.536565357 +0000 UTC m=+1471.726541052" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.541915 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elastic-operator-dockercfg-48pn2" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.570652 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"kube-root-ca.crt" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.647238 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"service-ca-operator-dockercfg-rg9jl" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.707326 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"node-bootstrapper-token" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.728888 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-operator"/"trusted-ca" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.790883 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca-operator"/"serving-cert" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.844644 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-operator-lifecycle-manager"/"openshift-service-ca.crt" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.874233 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"installation-pull-secrets" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.889385 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns"/"dns-default" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.909795 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-machine-approver"/"machine-approver-sa-dockercfg-nl2j4" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.913833 4875 generic.go:334] "Generic (PLEG): container finished" podID="2bf438ed-b485-429e-ab20-c5f354a3ab25" containerID="69ad54490d14386699363e8f1df531287523df983db3348f579efef865b0d906" exitCode=1 Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.913958 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" event={"ID":"2bf438ed-b485-429e-ab20-c5f354a3ab25","Type":"ContainerDied","Data":"69ad54490d14386699363e8f1df531287523df983db3348f579efef865b0d906"} Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.914986 4875 scope.go:117] "RemoveContainer" containerID="69ad54490d14386699363e8f1df531287523df983db3348f579efef865b0d906" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.930423 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"openshift-service-ca.crt" Nov 26 00:31:38 crc kubenswrapper[4875]: I1126 00:31:38.937738 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-samples-operator"/"kube-root-ca.crt" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.040580 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns"/"dns-default-metrics-tls" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.043701 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"audit-1" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.051839 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"kube-storage-version-migrator-operator-dockercfg-2bh8d" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.106962 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"kube-root-ca.crt" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.162123 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress"/"openshift-service-ca.crt" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.173234 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"client-ca" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.191403 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-dockercfg-qt55r" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.200020 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-canary"/"canary-serving-cert" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.210161 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"console-config" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.233280 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"trusted-ca-bundle" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.248158 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"openshift-service-ca.crt" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.254585 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-config-operator"/"openshift-config-operator-dockercfg-7pc5z" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.297836 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"packageserver-service-cert" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.465007 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"metrics-tls" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.518978 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-dockercfg-vw8fw" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.551562 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"kube-root-ca.crt" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.599128 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"openshift-global-ca" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.655506 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"kube-root-ca.crt" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.657032 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-scheduler-operator"/"kube-scheduler-operator-serving-cert" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.685797 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"openshift-service-ca.crt" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.747048 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"registry-dockercfg-kzzsd" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.784756 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"openshift-service-ca.crt" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.857460 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"signing-key" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.924318 4875 generic.go:334] "Generic (PLEG): container finished" podID="2bf438ed-b485-429e-ab20-c5f354a3ab25" containerID="cbf747232edb00a8c6aa9f672d16df0627c45059328c570fc29a9cdb3453aac5" exitCode=1 Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.924377 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" event={"ID":"2bf438ed-b485-429e-ab20-c5f354a3ab25","Type":"ContainerDied","Data":"cbf747232edb00a8c6aa9f672d16df0627c45059328c570fc29a9cdb3453aac5"} Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.924440 4875 scope.go:117] "RemoveContainer" containerID="69ad54490d14386699363e8f1df531287523df983db3348f579efef865b0d906" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.925098 4875 scope.go:117] "RemoveContainer" containerID="cbf747232edb00a8c6aa9f672d16df0627c45059328c570fc29a9cdb3453aac5" Nov 26 00:31:39 crc kubenswrapper[4875]: E1126 00:31:39.925593 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"cert-manager-cainjector\" with CrashLoopBackOff: \"back-off 10s restarting failed container=cert-manager-cainjector pod=cert-manager-cainjector-855d9ccff4-tcxqx_cert-manager(2bf438ed-b485-429e-ab20-c5f354a3ab25)\"" pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" podUID="2bf438ed-b485-429e-ab20-c5f354a3ab25" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.964869 4875 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-dockercfg-x9c9w" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.981662 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"observability-operator-sa-dockercfg-ghm7k" Nov 26 00:31:39 crc kubenswrapper[4875]: I1126 00:31:39.992681 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-operator"/"metrics-tls" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.144717 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"interconnect-operator-dockercfg-4b8fv" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.175489 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"metrics-daemon-sa-dockercfg-d427c" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.192245 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-node-identity"/"kube-root-ca.crt" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.210126 4875 reflector.go:368] Caches populated for *v1.RuntimeClass from k8s.io/client-go/informers/factory.go:160 Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.258024 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"kube-root-ca.crt" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.269317 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-multus"/"multus-admission-controller-secret" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.303627 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"oauth-apiserver-sa-dockercfg-6r2bq" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.322698 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"trusted-ca-bundle" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.366897 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-dockercfg-x57mr" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.410046 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-diagnostics"/"kube-root-ca.crt" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.447358 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"etcd-client" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.489982 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"mco-proxy-tls" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.501059 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca"/"signing-cabundle" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.534759 4875 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-cainjector-dockercfg-sk7kb" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.625999 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-version"/"kube-root-ca.crt" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.652402 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"encryption-config-1" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.665313 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-node-identity"/"network-node-identity-cert" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.678540 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-network-operator"/"iptables-alerter-script" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.708589 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-dockercfg-p4v7d" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.738158 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-kubernetes-node-dockercfg-pwtwl" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.739698 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"elasticsearch-es-remote-ca" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.788872 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-apiserver-operator-config" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.818837 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager"/"openshift-service-ca.crt" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.857839 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-metrics-certs-default" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.870557 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"prometheus-webhook-snmp-2-sys-config" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.934026 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_prometheus-webhook-snmp-2-build_650295c4-b8e4-4036-8245-441b299b5aac/docker-build/0.log" Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.934667 4875 generic.go:334] "Generic (PLEG): container finished" podID="650295c4-b8e4-4036-8245-441b299b5aac" containerID="c0eef9a70b2d5a1642372aabf0acebb86c0a1c2b67c2a93f2305b5695ce948a3" exitCode=1 Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.934699 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-2-build" event={"ID":"650295c4-b8e4-4036-8245-441b299b5aac","Type":"ContainerDied","Data":"c0eef9a70b2d5a1642372aabf0acebb86c0a1c2b67c2a93f2305b5695ce948a3"} Nov 26 00:31:40 crc kubenswrapper[4875]: I1126 00:31:40.999462 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-service-ca-operator"/"kube-root-ca.crt" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.003311 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-dns-operator"/"kube-root-ca.crt" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.029113 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator"/"kube-storage-version-migrator-sa-dockercfg-5xfcg" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.073767 4875 reflector.go:368] Caches populated for *v1.Secret from object-"cert-manager"/"cert-manager-webhook-dockercfg-knx9h" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.130785 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"kube-root-ca.crt" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.177226 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"etcd-client" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.258444 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"hostpath-provisioner"/"openshift-service-ca.crt" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.272545 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"elasticsearch-es-scripts" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.276041 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-storage-version-migrator-operator"/"serving-cert" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.317586 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"openshift-service-ca.crt" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.332590 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager-operator"/"openshift-controller-manager-operator-serving-cert" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.384861 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console-operator"/"console-operator-dockercfg-4xjcr" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.460074 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"kube-root-ca.crt" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.667845 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"v4-0-config-system-cliconfig" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.726697 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"image-registry-tls" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.727974 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-route-controller-manager"/"route-controller-manager-sa-dockercfg-h2zr2" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.755434 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"samples-operator-tls" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.786067 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-etcd-operator"/"etcd-operator-config" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.838877 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-service-ca"/"service-ca-dockercfg-pn86c" Nov 26 00:31:41 crc kubenswrapper[4875]: I1126 00:31:41.930648 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"marketplace-trusted-ca" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.041374 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"service-telemetry"/"elasticsearch-es-unicast-hosts" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.056979 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-dockercfg-t7n6l" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.076191 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console"/"oauth-serving-cert" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.108219 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"machine-config-operator-images" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.123522 4875 kubelet.go:2431] "SyncLoop REMOVE" source="file" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.123723 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" containerID="cri-o://6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10" gracePeriod=5 Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.130840 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-api"/"machine-api-operator-images" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.190005 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-cluster-machine-approver"/"openshift-service-ca.crt" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.198895 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-controller-dockercfg-c2lfx" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.200689 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_prometheus-webhook-snmp-2-build_650295c4-b8e4-4036-8245-441b299b5aac/docker-build/0.log" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.201606 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344071 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-push\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344130 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-root\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344188 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-node-pullsecrets\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344217 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-proxy-ca-bundles\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344254 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-system-configs\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344277 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-pull\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344310 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-run\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344337 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-ca-bundles\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344364 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-buildworkdir\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344506 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-build-blob-cache\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344547 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-buildcachedir\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.344600 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-jj87p\" (UniqueName: \"kubernetes.io/projected/650295c4-b8e4-4036-8245-441b299b5aac-kube-api-access-jj87p\") pod \"650295c4-b8e4-4036-8245-441b299b5aac\" (UID: \"650295c4-b8e4-4036-8245-441b299b5aac\") " Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.346006 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-node-pullsecrets" (OuterVolumeSpecName: "node-pullsecrets") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "node-pullsecrets". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.347288 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-run" (OuterVolumeSpecName: "container-storage-run") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "container-storage-run". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.347685 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-buildcachedir" (OuterVolumeSpecName: "buildcachedir") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "buildcachedir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.347992 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-ca-bundles" (OuterVolumeSpecName: "build-ca-bundles") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "build-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.348007 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-system-configs" (OuterVolumeSpecName: "build-system-configs") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "build-system-configs". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.348029 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-proxy-ca-bundles" (OuterVolumeSpecName: "build-proxy-ca-bundles") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "build-proxy-ca-bundles". PluginName "kubernetes.io/configmap", VolumeGidValue "" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.348517 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-buildworkdir" (OuterVolumeSpecName: "buildworkdir") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "buildworkdir". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.350771 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-push" (OuterVolumeSpecName: "builder-dockercfg-5d5px-push") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "builder-dockercfg-5d5px-push". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.350837 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-pull" (OuterVolumeSpecName: "builder-dockercfg-5d5px-pull") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "builder-dockercfg-5d5px-pull". PluginName "kubernetes.io/secret", VolumeGidValue "" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.360313 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/650295c4-b8e4-4036-8245-441b299b5aac-kube-api-access-jj87p" (OuterVolumeSpecName: "kube-api-access-jj87p") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "kube-api-access-jj87p". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.404864 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"cert-manager-operator"/"openshift-service-ca.crt" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.428287 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"multus-daemon-config" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.444017 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-build-blob-cache" (OuterVolumeSpecName: "build-blob-cache") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "build-blob-cache". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.446047 4875 reconciler_common.go:293] "Volume detached for volume \"build-blob-cache\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-build-blob-cache\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.446080 4875 reconciler_common.go:293] "Volume detached for volume \"buildcachedir\" (UniqueName: \"kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-buildcachedir\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.446089 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-jj87p\" (UniqueName: \"kubernetes.io/projected/650295c4-b8e4-4036-8245-441b299b5aac-kube-api-access-jj87p\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.446100 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-push\" (UniqueName: \"kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-push\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.446108 4875 reconciler_common.go:293] "Volume detached for volume \"node-pullsecrets\" (UniqueName: \"kubernetes.io/host-path/650295c4-b8e4-4036-8245-441b299b5aac-node-pullsecrets\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.446129 4875 reconciler_common.go:293] "Volume detached for volume \"build-proxy-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-proxy-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.446139 4875 reconciler_common.go:293] "Volume detached for volume \"build-system-configs\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-system-configs\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.446148 4875 reconciler_common.go:293] "Volume detached for volume \"builder-dockercfg-5d5px-pull\" (UniqueName: \"kubernetes.io/secret/650295c4-b8e4-4036-8245-441b299b5aac-builder-dockercfg-5d5px-pull\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.446157 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-run\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-run\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.446168 4875 reconciler_common.go:293] "Volume detached for volume \"build-ca-bundles\" (UniqueName: \"kubernetes.io/configmap/650295c4-b8e4-4036-8245-441b299b5aac-build-ca-bundles\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.446176 4875 reconciler_common.go:293] "Volume detached for volume \"buildworkdir\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-buildworkdir\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.471533 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-system-router-certs" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.487127 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"default-cni-sysctl-allowlist" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.522649 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-apiserver-operator"/"kube-root-ca.crt" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.531883 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-scheduler-operator"/"openshift-kube-scheduler-operator-config" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.536491 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-image-registry"/"cluster-image-registry-operator-dockercfg-m4qtx" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.601447 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ovn-kubernetes"/"ovnkube-script-lib" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.625687 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-multus"/"kube-root-ca.crt" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.743357 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"community-operators-dockercfg-dmngl" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.794028 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"service-ca-bundle" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.849451 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ovn-kubernetes"/"ovn-control-plane-metrics-cert" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.849989 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-marketplace"/"kube-root-ca.crt" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.864682 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator-operator"/"openshift-service-ca.crt" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.917944 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-machine-config-operator"/"kube-rbac-proxy" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.948810 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/service-telemetry_prometheus-webhook-snmp-2-build_650295c4-b8e4-4036-8245-441b299b5aac/docker-build/0.log" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.949524 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/prometheus-webhook-snmp-2-build" event={"ID":"650295c4-b8e4-4036-8245-441b299b5aac","Type":"ContainerDied","Data":"4a3c12665004c51d77b78385c7d531378331fc0d546d104801e9e57c4d6ede29"} Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.949587 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4a3c12665004c51d77b78385c7d531378331fc0d546d104801e9e57c4d6ede29" Nov 26 00:31:42 crc kubenswrapper[4875]: I1126 00:31:42.950356 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="service-telemetry/prometheus-webhook-snmp-2-build" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.061803 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"marketplace-operator-dockercfg-5nsgg" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.091999 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"trusted-ca-bundle" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.097021 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"openshift-service-ca.crt" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.142379 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-root" (OuterVolumeSpecName: "container-storage-root") pod "650295c4-b8e4-4036-8245-441b299b5aac" (UID: "650295c4-b8e4-4036-8245-441b299b5aac"). InnerVolumeSpecName "container-storage-root". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.156493 4875 reconciler_common.go:293] "Volume detached for volume \"container-storage-root\" (UniqueName: \"kubernetes.io/empty-dir/650295c4-b8e4-4036-8245-441b299b5aac-container-storage-root\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.271352 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication"/"kube-root-ca.crt" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.353282 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-dockercfg-qx5rd" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.619011 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-controller-manager"/"kube-root-ca.crt" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.674842 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-dockercfg-xtcjv" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.686549 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"machine-api-operator-dockercfg-mfbb7" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.743588 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"trusted-ca-bundle" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.745543 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-console-operator"/"kube-root-ca.crt" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.792519 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-config-operator"/"machine-config-server-tls" Nov 26 00:31:43 crc kubenswrapper[4875]: I1126 00:31:43.880033 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-machine-api"/"control-plane-machine-set-operator-dockercfg-k9rxt" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.006806 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-controller-manager"/"openshift-controller-manager-sa-dockercfg-msq4c" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.044177 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operator-lifecycle-manager"/"pprof-cert" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.275331 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-oauth-apiserver"/"serving-cert" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.312361 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-etcd-operator"/"etcd-client" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.337818 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver"/"etcd-serving-ca" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.349695 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-serving-cert" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.488096 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"openshift-service-ca.crt" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.511566 4875 reflector.go:368] Caches populated for *v1.Secret from object-"service-telemetry"/"default-dockercfg-j98p8" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.532195 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress-operator"/"ingress-operator-dockercfg-7lnqk" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.603398 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-samples-operator"/"cluster-samples-operator-dockercfg-xpp9w" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.608533 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-authentication"/"v4-0-config-user-template-login" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.643350 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"openshift-service-ca.crt" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.651583 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-marketplace"/"certified-operators-dockercfg-4rs5g" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.730958 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-ingress"/"router-stats-default" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.944593 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-serving-cert" Nov 26 00:31:44 crc kubenswrapper[4875]: I1126 00:31:44.990461 4875 reflector.go:368] Caches populated for *v1.Service from k8s.io/client-go/informers/factory.go:160 Nov 26 00:31:45 crc kubenswrapper[4875]: I1126 00:31:45.016031 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-apiserver"/"serving-cert" Nov 26 00:31:45 crc kubenswrapper[4875]: I1126 00:31:45.237495 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-cluster-version"/"default-dockercfg-gxtc4" Nov 26 00:31:45 crc kubenswrapper[4875]: I1126 00:31:45.243434 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-storage-version-migrator"/"openshift-service-ca.crt" Nov 26 00:31:45 crc kubenswrapper[4875]: I1126 00:31:45.253593 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-network-console"/"networking-console-plugin-cert" Nov 26 00:31:45 crc kubenswrapper[4875]: I1126 00:31:45.499310 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-console"/"console-serving-cert" Nov 26 00:31:46 crc kubenswrapper[4875]: I1126 00:31:46.163864 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-dns-operator"/"metrics-tls" Nov 26 00:31:46 crc kubenswrapper[4875]: I1126 00:31:46.423290 4875 reflector.go:368] Caches populated for *v1.Secret from object-"openshift-operators"/"obo-prometheus-operator-admission-webhook-service-cert" Nov 26 00:31:46 crc kubenswrapper[4875]: I1126 00:31:46.453381 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-kube-controller-manager-operator"/"kube-controller-manager-operator-config" Nov 26 00:31:46 crc kubenswrapper[4875]: I1126 00:31:46.554718 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-route-controller-manager"/"config" Nov 26 00:31:46 crc kubenswrapper[4875]: I1126 00:31:46.637671 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-authentication-operator"/"authentication-operator-config" Nov 26 00:31:46 crc kubenswrapper[4875]: I1126 00:31:46.800786 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-apiserver-operator"/"openshift-apiserver-operator-config" Nov 26 00:31:46 crc kubenswrapper[4875]: I1126 00:31:46.823251 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-ingress-canary"/"openshift-service-ca.crt" Nov 26 00:31:46 crc kubenswrapper[4875]: I1126 00:31:46.897621 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-oauth-apiserver"/"etcd-serving-ca" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.700580 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.700866 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827046 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827092 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827125 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827185 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827186 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests" (OuterVolumeSpecName: "manifests") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "manifests". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827208 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") pod \"f85e55b1a89d02b0cb034b1ea31ed45a\" (UID: \"f85e55b1a89d02b0cb034b1ea31ed45a\") " Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827263 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir" (OuterVolumeSpecName: "resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827267 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log" (OuterVolumeSpecName: "var-log") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-log". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827310 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock" (OuterVolumeSpecName: "var-lock") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "var-lock". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827705 4875 reconciler_common.go:293] "Volume detached for volume \"manifests\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-manifests\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827717 4875 reconciler_common.go:293] "Volume detached for volume \"var-log\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-log\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827726 4875 reconciler_common.go:293] "Volume detached for volume \"var-lock\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-var-lock\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.827734 4875 reconciler_common.go:293] "Volume detached for volume \"resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.834735 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir" (OuterVolumeSpecName: "pod-resource-dir") pod "f85e55b1a89d02b0cb034b1ea31ed45a" (UID: "f85e55b1a89d02b0cb034b1ea31ed45a"). InnerVolumeSpecName "pod-resource-dir". PluginName "kubernetes.io/host-path", VolumeGidValue "" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.928988 4875 reconciler_common.go:293] "Volume detached for volume \"pod-resource-dir\" (UniqueName: \"kubernetes.io/host-path/f85e55b1a89d02b0cb034b1ea31ed45a-pod-resource-dir\") on node \"crc\" DevicePath \"\"" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.980874 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-kube-apiserver_kube-apiserver-startup-monitor-crc_f85e55b1a89d02b0cb034b1ea31ed45a/startup-monitor/0.log" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.981161 4875 generic.go:334] "Generic (PLEG): container finished" podID="f85e55b1a89d02b0cb034b1ea31ed45a" containerID="6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10" exitCode=137 Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.981240 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" Nov 26 00:31:47 crc kubenswrapper[4875]: I1126 00:31:47.981247 4875 scope.go:117] "RemoveContainer" containerID="6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10" Nov 26 00:31:48 crc kubenswrapper[4875]: I1126 00:31:48.000748 4875 scope.go:117] "RemoveContainer" containerID="6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10" Nov 26 00:31:48 crc kubenswrapper[4875]: E1126 00:31:48.001568 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10\": container with ID starting with 6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10 not found: ID does not exist" containerID="6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10" Nov 26 00:31:48 crc kubenswrapper[4875]: I1126 00:31:48.001620 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10"} err="failed to get container status \"6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10\": rpc error: code = NotFound desc = could not find container \"6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10\": container with ID starting with 6579f652fc1a0ddc9835178f0d25afd6560cbfa0f63f67143a24f7628c16ee10 not found: ID does not exist" Nov 26 00:31:49 crc kubenswrapper[4875]: I1126 00:31:49.537606 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" path="/var/lib/kubelet/pods/f85e55b1a89d02b0cb034b1ea31ed45a/volumes" Nov 26 00:31:49 crc kubenswrapper[4875]: I1126 00:31:49.537919 4875 mirror_client.go:130] "Deleting a mirror pod" pod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" podUID="" Nov 26 00:31:49 crc kubenswrapper[4875]: I1126 00:31:49.547039 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 00:31:49 crc kubenswrapper[4875]: I1126 00:31:49.547073 4875 kubelet.go:2649] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="a17a61e7-d3d5-4fc8-acef-c2c6a95aab42" Nov 26 00:31:49 crc kubenswrapper[4875]: I1126 00:31:49.552310 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-kube-apiserver/kube-apiserver-startup-monitor-crc"] Nov 26 00:31:49 crc kubenswrapper[4875]: I1126 00:31:49.552354 4875 kubelet.go:2673] "Unable to find pod for mirror pod, skipping" mirrorPod="openshift-kube-apiserver/kube-apiserver-startup-monitor-crc" mirrorPodUID="a17a61e7-d3d5-4fc8-acef-c2c6a95aab42" Nov 26 00:31:52 crc kubenswrapper[4875]: I1126 00:31:52.026749 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="cert-manager/cert-manager-86cb77c54b-6mr68" podUID="20d9eaa7-e48a-4f9e-aa76-f7fe8494e270" containerName="cert-manager-controller" probeResult="failure" output="Get \"http://10.217.0.67:9403/livez\": dial tcp 10.217.0.67:9403: connect: connection refused" Nov 26 00:31:53 crc kubenswrapper[4875]: I1126 00:31:53.017474 4875 generic.go:334] "Generic (PLEG): container finished" podID="20d9eaa7-e48a-4f9e-aa76-f7fe8494e270" containerID="d27ec70223854359541ce1e9264aa45738e5894a0e130a1de8d3353cfbfd9120" exitCode=1 Nov 26 00:31:53 crc kubenswrapper[4875]: I1126 00:31:53.017544 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-6mr68" event={"ID":"20d9eaa7-e48a-4f9e-aa76-f7fe8494e270","Type":"ContainerDied","Data":"d27ec70223854359541ce1e9264aa45738e5894a0e130a1de8d3353cfbfd9120"} Nov 26 00:31:53 crc kubenswrapper[4875]: I1126 00:31:53.018449 4875 scope.go:117] "RemoveContainer" containerID="d27ec70223854359541ce1e9264aa45738e5894a0e130a1de8d3353cfbfd9120" Nov 26 00:31:53 crc kubenswrapper[4875]: I1126 00:31:53.529271 4875 scope.go:117] "RemoveContainer" containerID="37ee2fb3c4f9587c099434441f8a051c73d91c5cbcfda08bce7fe55cba41b95d" Nov 26 00:31:54 crc kubenswrapper[4875]: I1126 00:31:54.025243 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-86cb77c54b-6mr68" event={"ID":"20d9eaa7-e48a-4f9e-aa76-f7fe8494e270","Type":"ContainerStarted","Data":"7ebb5ab9f465dc55484b6a38e559cc62333e3fa7c33ce99ae1bebc97222e1aa0"} Nov 26 00:31:54 crc kubenswrapper[4875]: I1126 00:31:54.027336 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="service-telemetry/elastic-operator-bf7b77755-xjqrk" event={"ID":"5fdc5235-fab2-4d96-8526-1ca2270a92ac","Type":"ContainerStarted","Data":"8c58144d3b2a84c8cb6f91b9a7a1f6fd42fb5350c4b5e69178260a53c81a5fae"} Nov 26 00:31:54 crc kubenswrapper[4875]: I1126 00:31:54.529500 4875 scope.go:117] "RemoveContainer" containerID="cbf747232edb00a8c6aa9f672d16df0627c45059328c570fc29a9cdb3453aac5" Nov 26 00:31:55 crc kubenswrapper[4875]: I1126 00:31:55.034596 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="cert-manager/cert-manager-cainjector-855d9ccff4-tcxqx" event={"ID":"2bf438ed-b485-429e-ab20-c5f354a3ab25","Type":"ContainerStarted","Data":"7fd87d549d97957eed5fb3a612d485d36ff42bd1e9101eddcb604fca12ec8f8d"} Nov 26 00:32:03 crc kubenswrapper[4875]: I1126 00:32:03.083108 4875 generic.go:334] "Generic (PLEG): container finished" podID="ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1" containerID="7674e5fccd4aceddc7997075f6ead866729f1b3c0f825a5e89cd507d655443e9" exitCode=0 Nov 26 00:32:03 crc kubenswrapper[4875]: I1126 00:32:03.083182 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" event={"ID":"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1","Type":"ContainerDied","Data":"7674e5fccd4aceddc7997075f6ead866729f1b3c0f825a5e89cd507d655443e9"} Nov 26 00:32:03 crc kubenswrapper[4875]: I1126 00:32:03.083913 4875 scope.go:117] "RemoveContainer" containerID="7674e5fccd4aceddc7997075f6ead866729f1b3c0f825a5e89cd507d655443e9" Nov 26 00:32:04 crc kubenswrapper[4875]: I1126 00:32:04.094963 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" event={"ID":"ac9cfb7a-c7d1-45ae-8ce1-7613fc318ec1","Type":"ContainerStarted","Data":"63c23618dac40188becddd94d363b31aaae32259a1f7004854986b75b3afb4d9"} Nov 26 00:32:04 crc kubenswrapper[4875]: I1126 00:32:04.095733 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:32:04 crc kubenswrapper[4875]: I1126 00:32:04.097106 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/marketplace-operator-79b997595-bbdxg" Nov 26 00:32:07 crc kubenswrapper[4875]: I1126 00:32:07.512493 4875 patch_prober.go:28] interesting pod/machine-config-daemon-9mztb container/machine-config-daemon namespace/openshift-machine-config-operator: Liveness probe status=failure output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" start-of-body= Nov 26 00:32:07 crc kubenswrapper[4875]: I1126 00:32:07.512865 4875 prober.go:107] "Probe failed" probeType="Liveness" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" probeResult="failure" output="Get \"http://127.0.0.1:8798/health\": dial tcp 127.0.0.1:8798: connect: connection refused" Nov 26 00:32:07 crc kubenswrapper[4875]: I1126 00:32:07.512909 4875 kubelet.go:2542] "SyncLoop (probe)" probe="liveness" status="unhealthy" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" Nov 26 00:32:07 crc kubenswrapper[4875]: I1126 00:32:07.513477 4875 kuberuntime_manager.go:1027] "Message for Container of pod" containerName="machine-config-daemon" containerStatusID={"Type":"cri-o","ID":"5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921"} pod="openshift-machine-config-operator/machine-config-daemon-9mztb" containerMessage="Container machine-config-daemon failed liveness probe, will be restarted" Nov 26 00:32:07 crc kubenswrapper[4875]: I1126 00:32:07.513539 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerName="machine-config-daemon" containerID="cri-o://5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" gracePeriod=600 Nov 26 00:32:07 crc kubenswrapper[4875]: E1126 00:32:07.631783 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.088943 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-must-gather-55lm5/must-gather-kdgps"] Nov 26 00:32:08 crc kubenswrapper[4875]: E1126 00:32:08.089611 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="650295c4-b8e4-4036-8245-441b299b5aac" containerName="git-clone" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.089638 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="650295c4-b8e4-4036-8245-441b299b5aac" containerName="git-clone" Nov 26 00:32:08 crc kubenswrapper[4875]: E1126 00:32:08.089652 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="650295c4-b8e4-4036-8245-441b299b5aac" containerName="manage-dockerfile" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.089660 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="650295c4-b8e4-4036-8245-441b299b5aac" containerName="manage-dockerfile" Nov 26 00:32:08 crc kubenswrapper[4875]: E1126 00:32:08.089678 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" containerName="installer" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.089685 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" containerName="installer" Nov 26 00:32:08 crc kubenswrapper[4875]: E1126 00:32:08.089698 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.089705 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 00:32:08 crc kubenswrapper[4875]: E1126 00:32:08.089719 4875 cpu_manager.go:410] "RemoveStaleState: removing container" podUID="650295c4-b8e4-4036-8245-441b299b5aac" containerName="docker-build" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.089726 4875 state_mem.go:107] "Deleted CPUSet assignment" podUID="650295c4-b8e4-4036-8245-441b299b5aac" containerName="docker-build" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.089847 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="650295c4-b8e4-4036-8245-441b299b5aac" containerName="docker-build" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.089860 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="f85e55b1a89d02b0cb034b1ea31ed45a" containerName="startup-monitor" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.089876 4875 memory_manager.go:354] "RemoveStaleState removing state" podUID="4b9cbfca-aab6-4ba7-ae80-af21d5aaf76c" containerName="installer" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.090696 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-55lm5/must-gather-kdgps" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.091512 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-55lm5/must-gather-kdgps"] Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.093051 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-55lm5"/"kube-root-ca.crt" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.093269 4875 reflector.go:368] Caches populated for *v1.ConfigMap from object-"openshift-must-gather-55lm5"/"openshift-service-ca.crt" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.120364 4875 generic.go:334] "Generic (PLEG): container finished" podID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" exitCode=0 Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.120435 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" event={"ID":"0d4fb5a7-3ade-467a-9640-d744b1ef0c8e","Type":"ContainerDied","Data":"5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921"} Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.120481 4875 scope.go:117] "RemoveContainer" containerID="9d7963837af3c665aadff4c3b7d21b20d77252b8e2a08db4f64c8184c28bc3b4" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.121034 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:32:08 crc kubenswrapper[4875]: E1126 00:32:08.121310 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.135403 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de1b9642-5b3f-490f-a7bd-adf4247f7395-must-gather-output\") pod \"must-gather-kdgps\" (UID: \"de1b9642-5b3f-490f-a7bd-adf4247f7395\") " pod="openshift-must-gather-55lm5/must-gather-kdgps" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.135485 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-w67gx\" (UniqueName: \"kubernetes.io/projected/de1b9642-5b3f-490f-a7bd-adf4247f7395-kube-api-access-w67gx\") pod \"must-gather-kdgps\" (UID: \"de1b9642-5b3f-490f-a7bd-adf4247f7395\") " pod="openshift-must-gather-55lm5/must-gather-kdgps" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.236456 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-w67gx\" (UniqueName: \"kubernetes.io/projected/de1b9642-5b3f-490f-a7bd-adf4247f7395-kube-api-access-w67gx\") pod \"must-gather-kdgps\" (UID: \"de1b9642-5b3f-490f-a7bd-adf4247f7395\") " pod="openshift-must-gather-55lm5/must-gather-kdgps" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.236606 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de1b9642-5b3f-490f-a7bd-adf4247f7395-must-gather-output\") pod \"must-gather-kdgps\" (UID: \"de1b9642-5b3f-490f-a7bd-adf4247f7395\") " pod="openshift-must-gather-55lm5/must-gather-kdgps" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.237031 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de1b9642-5b3f-490f-a7bd-adf4247f7395-must-gather-output\") pod \"must-gather-kdgps\" (UID: \"de1b9642-5b3f-490f-a7bd-adf4247f7395\") " pod="openshift-must-gather-55lm5/must-gather-kdgps" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.260000 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-w67gx\" (UniqueName: \"kubernetes.io/projected/de1b9642-5b3f-490f-a7bd-adf4247f7395-kube-api-access-w67gx\") pod \"must-gather-kdgps\" (UID: \"de1b9642-5b3f-490f-a7bd-adf4247f7395\") " pod="openshift-must-gather-55lm5/must-gather-kdgps" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.411505 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-55lm5/must-gather-kdgps" Nov 26 00:32:08 crc kubenswrapper[4875]: I1126 00:32:08.584209 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-must-gather-55lm5/must-gather-kdgps"] Nov 26 00:32:09 crc kubenswrapper[4875]: I1126 00:32:09.129874 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-55lm5/must-gather-kdgps" event={"ID":"de1b9642-5b3f-490f-a7bd-adf4247f7395","Type":"ContainerStarted","Data":"4d62e6f3a454d5fff15e21fe10b195a23097fc1be2111dafb365a49415dfab4b"} Nov 26 00:32:14 crc kubenswrapper[4875]: I1126 00:32:14.166422 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-55lm5/must-gather-kdgps" event={"ID":"de1b9642-5b3f-490f-a7bd-adf4247f7395","Type":"ContainerStarted","Data":"0d321cc2dd03632f28e804867ab96d44be80d70572c982411c611c2f4e34c276"} Nov 26 00:32:15 crc kubenswrapper[4875]: I1126 00:32:15.174342 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-55lm5/must-gather-kdgps" event={"ID":"de1b9642-5b3f-490f-a7bd-adf4247f7395","Type":"ContainerStarted","Data":"7735f0ce65cc2d8af73b5384418079ea22254feb4de096bc188f3cd5623f239a"} Nov 26 00:32:16 crc kubenswrapper[4875]: E1126 00:32:16.372502 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:32:18 crc kubenswrapper[4875]: I1126 00:32:18.528893 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:32:18 crc kubenswrapper[4875]: E1126 00:32:18.529533 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.023030 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-must-gather-55lm5/must-gather-kdgps" podStartSLOduration=7.64614915 podStartE2EDuration="13.023012191s" podCreationTimestamp="2025-11-26 00:32:08 +0000 UTC" firstStartedPulling="2025-11-26 00:32:08.591142551 +0000 UTC m=+1501.781118246" lastFinishedPulling="2025-11-26 00:32:13.968005592 +0000 UTC m=+1507.157981287" observedRunningTime="2025-11-26 00:32:15.189034481 +0000 UTC m=+1508.379010176" watchObservedRunningTime="2025-11-26 00:32:21.023012191 +0000 UTC m=+1514.212987886" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.025575 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-647tp"] Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.026782 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.033456 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-647tp"] Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.200854 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-6tvwk\" (UniqueName: \"kubernetes.io/projected/ff970fac-2516-42c4-bdab-6be6f2985fb9-kube-api-access-6tvwk\") pod \"community-operators-647tp\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.200945 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-catalog-content\") pod \"community-operators-647tp\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.201003 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-utilities\") pod \"community-operators-647tp\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.302245 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-utilities\") pod \"community-operators-647tp\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.302310 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-6tvwk\" (UniqueName: \"kubernetes.io/projected/ff970fac-2516-42c4-bdab-6be6f2985fb9-kube-api-access-6tvwk\") pod \"community-operators-647tp\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.302338 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-catalog-content\") pod \"community-operators-647tp\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.302807 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-utilities\") pod \"community-operators-647tp\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.302852 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-catalog-content\") pod \"community-operators-647tp\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.326511 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-6tvwk\" (UniqueName: \"kubernetes.io/projected/ff970fac-2516-42c4-bdab-6be6f2985fb9-kube-api-access-6tvwk\") pod \"community-operators-647tp\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.346958 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:21 crc kubenswrapper[4875]: I1126 00:32:21.590309 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-647tp"] Nov 26 00:32:22 crc kubenswrapper[4875]: I1126 00:32:22.215835 4875 generic.go:334] "Generic (PLEG): container finished" podID="ff970fac-2516-42c4-bdab-6be6f2985fb9" containerID="82ed9cd20db03cee5d5c2e34855398a3339118555f1fdf656329b9aafb84989b" exitCode=0 Nov 26 00:32:22 crc kubenswrapper[4875]: I1126 00:32:22.215921 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-647tp" event={"ID":"ff970fac-2516-42c4-bdab-6be6f2985fb9","Type":"ContainerDied","Data":"82ed9cd20db03cee5d5c2e34855398a3339118555f1fdf656329b9aafb84989b"} Nov 26 00:32:22 crc kubenswrapper[4875]: I1126 00:32:22.216194 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-647tp" event={"ID":"ff970fac-2516-42c4-bdab-6be6f2985fb9","Type":"ContainerStarted","Data":"ad60d88b14eef34e6cd0db78ab2c52643af1f94c66dd41cb0f361a7e829b2299"} Nov 26 00:32:24 crc kubenswrapper[4875]: I1126 00:32:24.229103 4875 generic.go:334] "Generic (PLEG): container finished" podID="ff970fac-2516-42c4-bdab-6be6f2985fb9" containerID="6ac51d253ab237630b4c2d2924544dec85883e2c53f1b7d26c1ef13e6c5558b1" exitCode=0 Nov 26 00:32:24 crc kubenswrapper[4875]: I1126 00:32:24.229228 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-647tp" event={"ID":"ff970fac-2516-42c4-bdab-6be6f2985fb9","Type":"ContainerDied","Data":"6ac51d253ab237630b4c2d2924544dec85883e2c53f1b7d26c1ef13e6c5558b1"} Nov 26 00:32:26 crc kubenswrapper[4875]: I1126 00:32:26.246888 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-647tp" event={"ID":"ff970fac-2516-42c4-bdab-6be6f2985fb9","Type":"ContainerStarted","Data":"fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3"} Nov 26 00:32:26 crc kubenswrapper[4875]: I1126 00:32:26.266442 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-647tp" podStartSLOduration=2.241369895 podStartE2EDuration="5.266393143s" podCreationTimestamp="2025-11-26 00:32:21 +0000 UTC" firstStartedPulling="2025-11-26 00:32:22.217841147 +0000 UTC m=+1515.407816842" lastFinishedPulling="2025-11-26 00:32:25.242864395 +0000 UTC m=+1518.432840090" observedRunningTime="2025-11-26 00:32:26.263040383 +0000 UTC m=+1519.453016078" watchObservedRunningTime="2025-11-26 00:32:26.266393143 +0000 UTC m=+1519.456368838" Nov 26 00:32:26 crc kubenswrapper[4875]: E1126 00:32:26.510044 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:32:30 crc kubenswrapper[4875]: I1126 00:32:30.529273 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:32:30 crc kubenswrapper[4875]: E1126 00:32:30.529835 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:32:31 crc kubenswrapper[4875]: I1126 00:32:31.347569 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:31 crc kubenswrapper[4875]: I1126 00:32:31.347629 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:31 crc kubenswrapper[4875]: I1126 00:32:31.406175 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:32 crc kubenswrapper[4875]: I1126 00:32:32.328855 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-647tp" Nov 26 00:32:36 crc kubenswrapper[4875]: E1126 00:32:36.660114 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:32:43 crc kubenswrapper[4875]: I1126 00:32:43.531948 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:32:43 crc kubenswrapper[4875]: E1126 00:32:43.532722 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:32:46 crc kubenswrapper[4875]: E1126 00:32:46.795249 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.059330 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-ntrhj"] Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.061663 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.069984 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ntrhj"] Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.224292 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-lbgbm\" (UniqueName: \"kubernetes.io/projected/fa43ec61-012b-42c0-bcc7-26a36a0db093-kube-api-access-lbgbm\") pod \"community-operators-ntrhj\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.224352 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-utilities\") pod \"community-operators-ntrhj\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.224385 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-catalog-content\") pod \"community-operators-ntrhj\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.325308 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-catalog-content\") pod \"community-operators-ntrhj\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.325423 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-lbgbm\" (UniqueName: \"kubernetes.io/projected/fa43ec61-012b-42c0-bcc7-26a36a0db093-kube-api-access-lbgbm\") pod \"community-operators-ntrhj\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.325463 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-utilities\") pod \"community-operators-ntrhj\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.326146 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-catalog-content\") pod \"community-operators-ntrhj\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.326168 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-utilities\") pod \"community-operators-ntrhj\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.349779 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-lbgbm\" (UniqueName: \"kubernetes.io/projected/fa43ec61-012b-42c0-bcc7-26a36a0db093-kube-api-access-lbgbm\") pod \"community-operators-ntrhj\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.390282 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.434894 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-bc62x"] Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.436159 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.454310 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bc62x"] Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.630847 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-794hh\" (UniqueName: \"kubernetes.io/projected/756b3d84-948c-4ca2-ab16-8097c2274da4-kube-api-access-794hh\") pod \"community-operators-bc62x\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.631191 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-utilities\") pod \"community-operators-bc62x\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.631259 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-catalog-content\") pod \"community-operators-bc62x\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.646580 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_control-plane-machine-set-operator-78cbb6b69f-rln4j_986802f7-9a9e-4191-8545-0ccda0c2bcf5/control-plane-machine-set-operator/0.log" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.672846 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-ntrhj"] Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.732612 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-utilities\") pod \"community-operators-bc62x\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.732716 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-catalog-content\") pod \"community-operators-bc62x\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.732755 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-794hh\" (UniqueName: \"kubernetes.io/projected/756b3d84-948c-4ca2-ab16-8097c2274da4-kube-api-access-794hh\") pod \"community-operators-bc62x\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.733498 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-utilities\") pod \"community-operators-bc62x\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.733707 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-catalog-content\") pod \"community-operators-bc62x\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.756609 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-794hh\" (UniqueName: \"kubernetes.io/projected/756b3d84-948c-4ca2-ab16-8097c2274da4-kube-api-access-794hh\") pod \"community-operators-bc62x\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.765962 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.850992 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-pkmj5_d2c99872-a302-4599-96b7-2320051f069c/machine-api-operator/0.log" Nov 26 00:32:51 crc kubenswrapper[4875]: I1126 00:32:51.891915 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-machine-api_machine-api-operator-5694c8668f-pkmj5_d2c99872-a302-4599-96b7-2320051f069c/kube-rbac-proxy/0.log" Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.028995 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-bc62x"] Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.408834 4875 generic.go:334] "Generic (PLEG): container finished" podID="756b3d84-948c-4ca2-ab16-8097c2274da4" containerID="816b9f8199e4a2f162c48d1cc226c50d07d89355ce2d827743550d6f18332011" exitCode=0 Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.408900 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bc62x" event={"ID":"756b3d84-948c-4ca2-ab16-8097c2274da4","Type":"ContainerDied","Data":"816b9f8199e4a2f162c48d1cc226c50d07d89355ce2d827743550d6f18332011"} Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.408925 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bc62x" event={"ID":"756b3d84-948c-4ca2-ab16-8097c2274da4","Type":"ContainerStarted","Data":"133fba7a5f5563f78c64ea6b67c7a288e3639953b45c2b4f4aaf780886e93c58"} Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.410385 4875 generic.go:334] "Generic (PLEG): container finished" podID="fa43ec61-012b-42c0-bcc7-26a36a0db093" containerID="16f27c2fd4e9a6fcd0f88579a1eca42b7bfa805a1e80b38c05fd0b76e02ecff9" exitCode=0 Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.410423 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ntrhj" event={"ID":"fa43ec61-012b-42c0-bcc7-26a36a0db093","Type":"ContainerDied","Data":"16f27c2fd4e9a6fcd0f88579a1eca42b7bfa805a1e80b38c05fd0b76e02ecff9"} Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.410440 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ntrhj" event={"ID":"fa43ec61-012b-42c0-bcc7-26a36a0db093","Type":"ContainerStarted","Data":"d77ebeedf37f32d6168d1a405f252022f3470c7b25fc685fda234188a05c0767"} Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.636526 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-249dr"] Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.638499 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-249dr" Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.648206 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-249dr"] Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.745188 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-c2ccp\" (UniqueName: \"kubernetes.io/projected/b2e91527-9faa-4e82-af09-21d90c108433-kube-api-access-c2ccp\") pod \"community-operators-249dr\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " pod="openshift-marketplace/community-operators-249dr" Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.745244 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-utilities\") pod \"community-operators-249dr\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " pod="openshift-marketplace/community-operators-249dr" Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.745293 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-catalog-content\") pod \"community-operators-249dr\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " pod="openshift-marketplace/community-operators-249dr" Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.847179 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-c2ccp\" (UniqueName: \"kubernetes.io/projected/b2e91527-9faa-4e82-af09-21d90c108433-kube-api-access-c2ccp\") pod \"community-operators-249dr\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " pod="openshift-marketplace/community-operators-249dr" Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.847241 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-utilities\") pod \"community-operators-249dr\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " pod="openshift-marketplace/community-operators-249dr" Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.847306 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-catalog-content\") pod \"community-operators-249dr\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " pod="openshift-marketplace/community-operators-249dr" Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.847942 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-utilities\") pod \"community-operators-249dr\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " pod="openshift-marketplace/community-operators-249dr" Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.847988 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-catalog-content\") pod \"community-operators-249dr\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " pod="openshift-marketplace/community-operators-249dr" Nov 26 00:32:52 crc kubenswrapper[4875]: I1126 00:32:52.881302 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-c2ccp\" (UniqueName: \"kubernetes.io/projected/b2e91527-9faa-4e82-af09-21d90c108433-kube-api-access-c2ccp\") pod \"community-operators-249dr\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " pod="openshift-marketplace/community-operators-249dr" Nov 26 00:32:53 crc kubenswrapper[4875]: I1126 00:32:53.022389 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-249dr" Nov 26 00:32:53 crc kubenswrapper[4875]: I1126 00:32:53.275916 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-249dr"] Nov 26 00:32:53 crc kubenswrapper[4875]: I1126 00:32:53.417525 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ntrhj" event={"ID":"fa43ec61-012b-42c0-bcc7-26a36a0db093","Type":"ContainerStarted","Data":"061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b"} Nov 26 00:32:53 crc kubenswrapper[4875]: I1126 00:32:53.421511 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-249dr" event={"ID":"b2e91527-9faa-4e82-af09-21d90c108433","Type":"ContainerStarted","Data":"9a69a18399ff09c0d6830e6e27ed73796d8fe52f3316057a87c619408b6d35ad"} Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.435013 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-25nqc"] Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.436160 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.436191 4875 generic.go:334] "Generic (PLEG): container finished" podID="fa43ec61-012b-42c0-bcc7-26a36a0db093" containerID="061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b" exitCode=0 Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.436269 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ntrhj" event={"ID":"fa43ec61-012b-42c0-bcc7-26a36a0db093","Type":"ContainerDied","Data":"061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b"} Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.441481 4875 generic.go:334] "Generic (PLEG): container finished" podID="b2e91527-9faa-4e82-af09-21d90c108433" containerID="1117a8976de610b780f60f3de93cc6ab5f3accfcbf05e899ca736225df1f0c23" exitCode=0 Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.441555 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-249dr" event={"ID":"b2e91527-9faa-4e82-af09-21d90c108433","Type":"ContainerDied","Data":"1117a8976de610b780f60f3de93cc6ab5f3accfcbf05e899ca736225df1f0c23"} Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.445709 4875 generic.go:334] "Generic (PLEG): container finished" podID="756b3d84-948c-4ca2-ab16-8097c2274da4" containerID="2b3c37aaa085fd2049e5fa169483a513e863ef86a9d33edc7ff771839a56b1c5" exitCode=0 Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.445749 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bc62x" event={"ID":"756b3d84-948c-4ca2-ab16-8097c2274da4","Type":"ContainerDied","Data":"2b3c37aaa085fd2049e5fa169483a513e863ef86a9d33edc7ff771839a56b1c5"} Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.449557 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-25nqc"] Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.569810 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-catalog-content\") pod \"community-operators-25nqc\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.569869 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-utilities\") pod \"community-operators-25nqc\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.569937 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-f76mt\" (UniqueName: \"kubernetes.io/projected/dc6d1620-c8a0-4c69-81db-8cf106f500d1-kube-api-access-f76mt\") pod \"community-operators-25nqc\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.671066 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-catalog-content\") pod \"community-operators-25nqc\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.671141 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-utilities\") pod \"community-operators-25nqc\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.671202 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-f76mt\" (UniqueName: \"kubernetes.io/projected/dc6d1620-c8a0-4c69-81db-8cf106f500d1-kube-api-access-f76mt\") pod \"community-operators-25nqc\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.671880 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-catalog-content\") pod \"community-operators-25nqc\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.671887 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-utilities\") pod \"community-operators-25nqc\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.690920 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-f76mt\" (UniqueName: \"kubernetes.io/projected/dc6d1620-c8a0-4c69-81db-8cf106f500d1-kube-api-access-f76mt\") pod \"community-operators-25nqc\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:32:54 crc kubenswrapper[4875]: I1126 00:32:54.752388 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:32:55 crc kubenswrapper[4875]: I1126 00:32:55.171445 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-25nqc"] Nov 26 00:32:55 crc kubenswrapper[4875]: W1126 00:32:55.174403 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-poddc6d1620_c8a0_4c69_81db_8cf106f500d1.slice/crio-932a4b34b7e2b7377c25bea02c344d88587aa03e13d62120cd7a906aa6b393ba WatchSource:0}: Error finding container 932a4b34b7e2b7377c25bea02c344d88587aa03e13d62120cd7a906aa6b393ba: Status 404 returned error can't find the container with id 932a4b34b7e2b7377c25bea02c344d88587aa03e13d62120cd7a906aa6b393ba Nov 26 00:32:55 crc kubenswrapper[4875]: I1126 00:32:55.453927 4875 generic.go:334] "Generic (PLEG): container finished" podID="b2e91527-9faa-4e82-af09-21d90c108433" containerID="95b705b1f2f1ab24d79d00d95d816c3b8eadc3e45c077c6c9278babe652e5b38" exitCode=0 Nov 26 00:32:55 crc kubenswrapper[4875]: I1126 00:32:55.453980 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-249dr" event={"ID":"b2e91527-9faa-4e82-af09-21d90c108433","Type":"ContainerDied","Data":"95b705b1f2f1ab24d79d00d95d816c3b8eadc3e45c077c6c9278babe652e5b38"} Nov 26 00:32:55 crc kubenswrapper[4875]: I1126 00:32:55.458282 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bc62x" event={"ID":"756b3d84-948c-4ca2-ab16-8097c2274da4","Type":"ContainerStarted","Data":"396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d"} Nov 26 00:32:55 crc kubenswrapper[4875]: I1126 00:32:55.459786 4875 generic.go:334] "Generic (PLEG): container finished" podID="dc6d1620-c8a0-4c69-81db-8cf106f500d1" containerID="9c652313d5bfd3150bfdc4581e0691376e968cd60de5d6365cb9e358a6e4ac3f" exitCode=0 Nov 26 00:32:55 crc kubenswrapper[4875]: I1126 00:32:55.459837 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25nqc" event={"ID":"dc6d1620-c8a0-4c69-81db-8cf106f500d1","Type":"ContainerDied","Data":"9c652313d5bfd3150bfdc4581e0691376e968cd60de5d6365cb9e358a6e4ac3f"} Nov 26 00:32:55 crc kubenswrapper[4875]: I1126 00:32:55.459895 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25nqc" event={"ID":"dc6d1620-c8a0-4c69-81db-8cf106f500d1","Type":"ContainerStarted","Data":"932a4b34b7e2b7377c25bea02c344d88587aa03e13d62120cd7a906aa6b393ba"} Nov 26 00:32:55 crc kubenswrapper[4875]: I1126 00:32:55.465877 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ntrhj" event={"ID":"fa43ec61-012b-42c0-bcc7-26a36a0db093","Type":"ContainerStarted","Data":"877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4"} Nov 26 00:32:55 crc kubenswrapper[4875]: I1126 00:32:55.498878 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-ntrhj" podStartSLOduration=2.0769288870000002 podStartE2EDuration="4.498860077s" podCreationTimestamp="2025-11-26 00:32:51 +0000 UTC" firstStartedPulling="2025-11-26 00:32:52.411821303 +0000 UTC m=+1545.601796988" lastFinishedPulling="2025-11-26 00:32:54.833752483 +0000 UTC m=+1548.023728178" observedRunningTime="2025-11-26 00:32:55.497367686 +0000 UTC m=+1548.687343381" watchObservedRunningTime="2025-11-26 00:32:55.498860077 +0000 UTC m=+1548.688835772" Nov 26 00:32:55 crc kubenswrapper[4875]: I1126 00:32:55.544891 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-bc62x" podStartSLOduration=1.845438133 podStartE2EDuration="4.544875017s" podCreationTimestamp="2025-11-26 00:32:51 +0000 UTC" firstStartedPulling="2025-11-26 00:32:52.411209367 +0000 UTC m=+1545.601185062" lastFinishedPulling="2025-11-26 00:32:55.110646251 +0000 UTC m=+1548.300621946" observedRunningTime="2025-11-26 00:32:55.5405562 +0000 UTC m=+1548.730531895" watchObservedRunningTime="2025-11-26 00:32:55.544875017 +0000 UTC m=+1548.734850702" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.244611 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-2bmxq"] Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.246117 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.258178 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2bmxq"] Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.394965 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bqbzm\" (UniqueName: \"kubernetes.io/projected/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-kube-api-access-bqbzm\") pod \"redhat-operators-2bmxq\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.395358 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-catalog-content\") pod \"redhat-operators-2bmxq\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.395628 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-utilities\") pod \"redhat-operators-2bmxq\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.475185 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-249dr" event={"ID":"b2e91527-9faa-4e82-af09-21d90c108433","Type":"ContainerStarted","Data":"0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9"} Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.497198 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bqbzm\" (UniqueName: \"kubernetes.io/projected/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-kube-api-access-bqbzm\") pod \"redhat-operators-2bmxq\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.497316 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-catalog-content\") pod \"redhat-operators-2bmxq\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.497309 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-249dr" podStartSLOduration=3.077189211 podStartE2EDuration="4.497279572s" podCreationTimestamp="2025-11-26 00:32:52 +0000 UTC" firstStartedPulling="2025-11-26 00:32:54.443136912 +0000 UTC m=+1547.633112607" lastFinishedPulling="2025-11-26 00:32:55.863227273 +0000 UTC m=+1549.053202968" observedRunningTime="2025-11-26 00:32:56.494406864 +0000 UTC m=+1549.684382559" watchObservedRunningTime="2025-11-26 00:32:56.497279572 +0000 UTC m=+1549.687255267" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.497445 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-utilities\") pod \"redhat-operators-2bmxq\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.498001 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-utilities\") pod \"redhat-operators-2bmxq\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.497999 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-catalog-content\") pod \"redhat-operators-2bmxq\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.519059 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bqbzm\" (UniqueName: \"kubernetes.io/projected/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-kube-api-access-bqbzm\") pod \"redhat-operators-2bmxq\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.528786 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:32:56 crc kubenswrapper[4875]: E1126 00:32:56.529018 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:32:56 crc kubenswrapper[4875]: I1126 00:32:56.560410 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:32:56 crc kubenswrapper[4875]: E1126 00:32:56.952817 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:32:57 crc kubenswrapper[4875]: I1126 00:32:57.097875 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-2bmxq"] Nov 26 00:32:57 crc kubenswrapper[4875]: I1126 00:32:57.483368 4875 generic.go:334] "Generic (PLEG): container finished" podID="dc6d1620-c8a0-4c69-81db-8cf106f500d1" containerID="0aae320e023b52ed51f67f3701af36ebce6882d879d59052a686aae8a73ea69a" exitCode=0 Nov 26 00:32:57 crc kubenswrapper[4875]: I1126 00:32:57.483448 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25nqc" event={"ID":"dc6d1620-c8a0-4c69-81db-8cf106f500d1","Type":"ContainerDied","Data":"0aae320e023b52ed51f67f3701af36ebce6882d879d59052a686aae8a73ea69a"} Nov 26 00:32:57 crc kubenswrapper[4875]: I1126 00:32:57.485958 4875 generic.go:334] "Generic (PLEG): container finished" podID="cf29d3d9-d456-4db2-b187-d3a5f1fa74d8" containerID="bfb90bfebcf662aa8640b0bbb46c703704bac4c196332c1a7d756ea9ebbb9cdf" exitCode=0 Nov 26 00:32:57 crc kubenswrapper[4875]: I1126 00:32:57.489598 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bmxq" event={"ID":"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8","Type":"ContainerDied","Data":"bfb90bfebcf662aa8640b0bbb46c703704bac4c196332c1a7d756ea9ebbb9cdf"} Nov 26 00:32:57 crc kubenswrapper[4875]: I1126 00:32:57.489792 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bmxq" event={"ID":"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8","Type":"ContainerStarted","Data":"1fed9c3314a2f27139e229f1740238e57b21633e26b0c838ad36a6c281ea408e"} Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.496008 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bmxq" event={"ID":"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8","Type":"ContainerStarted","Data":"e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024"} Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.501700 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25nqc" event={"ID":"dc6d1620-c8a0-4c69-81db-8cf106f500d1","Type":"ContainerStarted","Data":"b30183f9e6336e215df32a7e85c1f82728f6c3130e365da925969d3f38ebbce0"} Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.548269 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-25nqc" podStartSLOduration=2.121064464 podStartE2EDuration="4.548246697s" podCreationTimestamp="2025-11-26 00:32:54 +0000 UTC" firstStartedPulling="2025-11-26 00:32:55.460843503 +0000 UTC m=+1548.650819198" lastFinishedPulling="2025-11-26 00:32:57.888025736 +0000 UTC m=+1551.078001431" observedRunningTime="2025-11-26 00:32:58.545051369 +0000 UTC m=+1551.735027064" watchObservedRunningTime="2025-11-26 00:32:58.548246697 +0000 UTC m=+1551.738222392" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.646158 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-rfmwb"] Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.647506 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.660865 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rfmwb"] Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.744235 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-catalog-content\") pod \"community-operators-rfmwb\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.744315 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-b4p8w\" (UniqueName: \"kubernetes.io/projected/136bbb86-4bff-47a2-947c-399c8bed11ca-kube-api-access-b4p8w\") pod \"community-operators-rfmwb\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.744394 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-utilities\") pod \"community-operators-rfmwb\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.835231 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-w76tl"] Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.836737 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.846259 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-catalog-content\") pod \"community-operators-rfmwb\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.846322 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-b4p8w\" (UniqueName: \"kubernetes.io/projected/136bbb86-4bff-47a2-947c-399c8bed11ca-kube-api-access-b4p8w\") pod \"community-operators-rfmwb\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.846384 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-utilities\") pod \"community-operators-rfmwb\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.846777 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-catalog-content\") pod \"community-operators-rfmwb\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.846937 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-utilities\") pod \"community-operators-rfmwb\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.848912 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w76tl"] Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.875369 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-b4p8w\" (UniqueName: \"kubernetes.io/projected/136bbb86-4bff-47a2-947c-399c8bed11ca-kube-api-access-b4p8w\") pod \"community-operators-rfmwb\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.948352 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-xnrqm\" (UniqueName: \"kubernetes.io/projected/3b1ea123-2e62-46c9-829a-f62e2cb207a8-kube-api-access-xnrqm\") pod \"redhat-operators-w76tl\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.948695 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-catalog-content\") pod \"redhat-operators-w76tl\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.948802 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-utilities\") pod \"redhat-operators-w76tl\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:32:58 crc kubenswrapper[4875]: I1126 00:32:58.974265 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.051023 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-xnrqm\" (UniqueName: \"kubernetes.io/projected/3b1ea123-2e62-46c9-829a-f62e2cb207a8-kube-api-access-xnrqm\") pod \"redhat-operators-w76tl\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.051663 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-catalog-content\") pod \"redhat-operators-w76tl\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.051722 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-utilities\") pod \"redhat-operators-w76tl\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.052951 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-utilities\") pod \"redhat-operators-w76tl\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.053947 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-catalog-content\") pod \"redhat-operators-w76tl\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.094132 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-xnrqm\" (UniqueName: \"kubernetes.io/projected/3b1ea123-2e62-46c9-829a-f62e2cb207a8-kube-api-access-xnrqm\") pod \"redhat-operators-w76tl\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.153893 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.434194 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-rfmwb"] Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.500195 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-w76tl"] Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.515843 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfmwb" event={"ID":"136bbb86-4bff-47a2-947c-399c8bed11ca","Type":"ContainerStarted","Data":"bf1a14709f804059e7e97eb5711e0f5c57d33d703b87b678a0f53a77917c5f62"} Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.519562 4875 generic.go:334] "Generic (PLEG): container finished" podID="cf29d3d9-d456-4db2-b187-d3a5f1fa74d8" containerID="e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024" exitCode=0 Nov 26 00:32:59 crc kubenswrapper[4875]: I1126 00:32:59.520078 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bmxq" event={"ID":"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8","Type":"ContainerDied","Data":"e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024"} Nov 26 00:33:00 crc kubenswrapper[4875]: I1126 00:33:00.527935 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w76tl" event={"ID":"3b1ea123-2e62-46c9-829a-f62e2cb207a8","Type":"ContainerDied","Data":"05a18c7faeb164544afccfa6226c0f65cedcdc005df6e33eedb2b44e7a5e14ab"} Nov 26 00:33:00 crc kubenswrapper[4875]: I1126 00:33:00.527933 4875 generic.go:334] "Generic (PLEG): container finished" podID="3b1ea123-2e62-46c9-829a-f62e2cb207a8" containerID="05a18c7faeb164544afccfa6226c0f65cedcdc005df6e33eedb2b44e7a5e14ab" exitCode=0 Nov 26 00:33:00 crc kubenswrapper[4875]: I1126 00:33:00.529021 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w76tl" event={"ID":"3b1ea123-2e62-46c9-829a-f62e2cb207a8","Type":"ContainerStarted","Data":"5c65844e98c0f05694d13644138d39b85b6c6814bb66396ad4411005d0a42e9e"} Nov 26 00:33:00 crc kubenswrapper[4875]: I1126 00:33:00.530838 4875 generic.go:334] "Generic (PLEG): container finished" podID="136bbb86-4bff-47a2-947c-399c8bed11ca" containerID="1adfc7761690b44a2d4351a63e7bc2fa5a0cfd9c2df23f6deaee279c0052f15c" exitCode=0 Nov 26 00:33:00 crc kubenswrapper[4875]: I1126 00:33:00.530943 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfmwb" event={"ID":"136bbb86-4bff-47a2-947c-399c8bed11ca","Type":"ContainerDied","Data":"1adfc7761690b44a2d4351a63e7bc2fa5a0cfd9c2df23f6deaee279c0052f15c"} Nov 26 00:33:00 crc kubenswrapper[4875]: I1126 00:33:00.535066 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bmxq" event={"ID":"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8","Type":"ContainerStarted","Data":"5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730"} Nov 26 00:33:00 crc kubenswrapper[4875]: I1126 00:33:00.585222 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-2bmxq" podStartSLOduration=2.150590175 podStartE2EDuration="4.58520132s" podCreationTimestamp="2025-11-26 00:32:56 +0000 UTC" firstStartedPulling="2025-11-26 00:32:57.491518385 +0000 UTC m=+1550.681494080" lastFinishedPulling="2025-11-26 00:32:59.92612953 +0000 UTC m=+1553.116105225" observedRunningTime="2025-11-26 00:33:00.578164708 +0000 UTC m=+1553.768140393" watchObservedRunningTime="2025-11-26 00:33:00.58520132 +0000 UTC m=+1553.775177015" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.040438 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/community-operators-tt82f"] Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.047220 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.057760 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tt82f"] Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.186235 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-utilities\") pod \"community-operators-tt82f\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.186867 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-vj2ps\" (UniqueName: \"kubernetes.io/projected/bef9a3b8-a74c-4e4a-8b50-d216af962a40-kube-api-access-vj2ps\") pod \"community-operators-tt82f\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.187058 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-catalog-content\") pod \"community-operators-tt82f\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.237821 4875 kubelet.go:2421] "SyncLoop ADD" source="api" pods=["openshift-marketplace/redhat-operators-qhkn2"] Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.239458 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.262364 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qhkn2"] Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.288076 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-utilities\") pod \"community-operators-tt82f\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.288153 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-vj2ps\" (UniqueName: \"kubernetes.io/projected/bef9a3b8-a74c-4e4a-8b50-d216af962a40-kube-api-access-vj2ps\") pod \"community-operators-tt82f\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.288220 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-catalog-content\") pod \"community-operators-tt82f\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.288755 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-catalog-content\") pod \"community-operators-tt82f\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.289211 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-utilities\") pod \"community-operators-tt82f\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.319524 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-vj2ps\" (UniqueName: \"kubernetes.io/projected/bef9a3b8-a74c-4e4a-8b50-d216af962a40-kube-api-access-vj2ps\") pod \"community-operators-tt82f\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.366672 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.389648 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-catalog-content\") pod \"redhat-operators-qhkn2\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.389742 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"kube-api-access-bnt62\" (UniqueName: \"kubernetes.io/projected/07908ebc-ce0d-47c9-83c5-b459c3c23620-kube-api-access-bnt62\") pod \"redhat-operators-qhkn2\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.389804 4875 reconciler_common.go:245] "operationExecutor.VerifyControllerAttachedVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-utilities\") pod \"redhat-operators-qhkn2\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.391088 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.391156 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.474712 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.492171 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"kube-api-access-bnt62\" (UniqueName: \"kubernetes.io/projected/07908ebc-ce0d-47c9-83c5-b459c3c23620-kube-api-access-bnt62\") pod \"redhat-operators-qhkn2\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.492273 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-utilities\") pod \"redhat-operators-qhkn2\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.492348 4875 reconciler_common.go:218] "operationExecutor.MountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-catalog-content\") pod \"redhat-operators-qhkn2\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.492989 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-catalog-content\") pod \"redhat-operators-qhkn2\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.493266 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-utilities\") pod \"redhat-operators-qhkn2\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.580882 4875 operation_generator.go:637] "MountVolume.SetUp succeeded for volume \"kube-api-access-bnt62\" (UniqueName: \"kubernetes.io/projected/07908ebc-ce0d-47c9-83c5-b459c3c23620-kube-api-access-bnt62\") pod \"redhat-operators-qhkn2\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.582457 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w76tl" event={"ID":"3b1ea123-2e62-46c9-829a-f62e2cb207a8","Type":"ContainerStarted","Data":"df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a"} Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.676622 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.747783 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/community-operators-tt82f"] Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.766151 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.767284 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.816081 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:33:01 crc kubenswrapper[4875]: I1126 00:33:01.851569 4875 util.go:30] "No sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.151101 4875 kubelet.go:2428] "SyncLoop UPDATE" source="api" pods=["openshift-marketplace/redhat-operators-qhkn2"] Nov 26 00:33:02 crc kubenswrapper[4875]: W1126 00:33:02.168215 4875 manager.go:1169] Failed to process watch event {EventType:0 Name:/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod07908ebc_ce0d_47c9_83c5_b459c3c23620.slice/crio-450a11dda5cd27e3e94082e839ef3c71e5d7e0e482761f2a75c4a06d201c5c3f WatchSource:0}: Error finding container 450a11dda5cd27e3e94082e839ef3c71e5d7e0e482761f2a75c4a06d201c5c3f: Status 404 returned error can't find the container with id 450a11dda5cd27e3e94082e839ef3c71e5d7e0e482761f2a75c4a06d201c5c3f Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.592873 4875 generic.go:334] "Generic (PLEG): container finished" podID="3b1ea123-2e62-46c9-829a-f62e2cb207a8" containerID="df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a" exitCode=0 Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.592951 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w76tl" event={"ID":"3b1ea123-2e62-46c9-829a-f62e2cb207a8","Type":"ContainerDied","Data":"df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a"} Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.596787 4875 generic.go:334] "Generic (PLEG): container finished" podID="bef9a3b8-a74c-4e4a-8b50-d216af962a40" containerID="3b3b31d4edfb5d89bb423019bb5d7580341766e544ad51c3551fd775b486b7b0" exitCode=0 Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.596900 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tt82f" event={"ID":"bef9a3b8-a74c-4e4a-8b50-d216af962a40","Type":"ContainerDied","Data":"3b3b31d4edfb5d89bb423019bb5d7580341766e544ad51c3551fd775b486b7b0"} Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.596946 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tt82f" event={"ID":"bef9a3b8-a74c-4e4a-8b50-d216af962a40","Type":"ContainerStarted","Data":"ebc15e7bef24745d57ddcc3a975f2da0b2496820be005f167dc9d92cbbac1ed9"} Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.599783 4875 generic.go:334] "Generic (PLEG): container finished" podID="07908ebc-ce0d-47c9-83c5-b459c3c23620" containerID="45d36ebfb57d4be58053da89fdfdf398d7dd13813f9ef80a73d0dcede673b5f5" exitCode=0 Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.600290 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhkn2" event={"ID":"07908ebc-ce0d-47c9-83c5-b459c3c23620","Type":"ContainerDied","Data":"45d36ebfb57d4be58053da89fdfdf398d7dd13813f9ef80a73d0dcede673b5f5"} Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.600354 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhkn2" event={"ID":"07908ebc-ce0d-47c9-83c5-b459c3c23620","Type":"ContainerStarted","Data":"450a11dda5cd27e3e94082e839ef3c71e5d7e0e482761f2a75c4a06d201c5c3f"} Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.603746 4875 generic.go:334] "Generic (PLEG): container finished" podID="136bbb86-4bff-47a2-947c-399c8bed11ca" containerID="47b5544b34aeabb49ab968d047a4557bd551a4e8267d5fc24cbcef8c1a686b98" exitCode=0 Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.604588 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfmwb" event={"ID":"136bbb86-4bff-47a2-947c-399c8bed11ca","Type":"ContainerDied","Data":"47b5544b34aeabb49ab968d047a4557bd551a4e8267d5fc24cbcef8c1a686b98"} Nov 26 00:33:02 crc kubenswrapper[4875]: I1126 00:33:02.666401 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:33:03 crc kubenswrapper[4875]: I1126 00:33:03.022974 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-249dr" Nov 26 00:33:03 crc kubenswrapper[4875]: I1126 00:33:03.023058 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-249dr" Nov 26 00:33:03 crc kubenswrapper[4875]: I1126 00:33:03.096367 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-249dr" Nov 26 00:33:03 crc kubenswrapper[4875]: I1126 00:33:03.615753 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w76tl" event={"ID":"3b1ea123-2e62-46c9-829a-f62e2cb207a8","Type":"ContainerStarted","Data":"be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a"} Nov 26 00:33:03 crc kubenswrapper[4875]: I1126 00:33:03.635610 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tt82f" event={"ID":"bef9a3b8-a74c-4e4a-8b50-d216af962a40","Type":"ContainerStarted","Data":"bcbb01155d22740eb6f652b6ef3c1091dc174c1189ffc71052b4690fb36c0333"} Nov 26 00:33:03 crc kubenswrapper[4875]: I1126 00:33:03.648636 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-w76tl" podStartSLOduration=3.190333012 podStartE2EDuration="5.648619051s" podCreationTimestamp="2025-11-26 00:32:58 +0000 UTC" firstStartedPulling="2025-11-26 00:33:00.530177053 +0000 UTC m=+1553.720152748" lastFinishedPulling="2025-11-26 00:33:02.988463092 +0000 UTC m=+1556.178438787" observedRunningTime="2025-11-26 00:33:03.644976242 +0000 UTC m=+1556.834951937" watchObservedRunningTime="2025-11-26 00:33:03.648619051 +0000 UTC m=+1556.838594746" Nov 26 00:33:03 crc kubenswrapper[4875]: I1126 00:33:03.661651 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhkn2" event={"ID":"07908ebc-ce0d-47c9-83c5-b459c3c23620","Type":"ContainerStarted","Data":"b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187"} Nov 26 00:33:03 crc kubenswrapper[4875]: I1126 00:33:03.684807 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfmwb" event={"ID":"136bbb86-4bff-47a2-947c-399c8bed11ca","Type":"ContainerStarted","Data":"ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2"} Nov 26 00:33:03 crc kubenswrapper[4875]: I1126 00:33:03.738318 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-249dr" Nov 26 00:33:03 crc kubenswrapper[4875]: I1126 00:33:03.758199 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-rfmwb" podStartSLOduration=3.276136864 podStartE2EDuration="5.758181029s" podCreationTimestamp="2025-11-26 00:32:58 +0000 UTC" firstStartedPulling="2025-11-26 00:33:00.53226371 +0000 UTC m=+1553.722239405" lastFinishedPulling="2025-11-26 00:33:03.014307875 +0000 UTC m=+1556.204283570" observedRunningTime="2025-11-26 00:33:03.755392683 +0000 UTC m=+1556.945368378" watchObservedRunningTime="2025-11-26 00:33:03.758181029 +0000 UTC m=+1556.948156714" Nov 26 00:33:04 crc kubenswrapper[4875]: I1126 00:33:04.753284 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:33:04 crc kubenswrapper[4875]: I1126 00:33:04.753680 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:33:04 crc kubenswrapper[4875]: I1126 00:33:04.796039 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:33:05 crc kubenswrapper[4875]: I1126 00:33:05.751475 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:33:06 crc kubenswrapper[4875]: I1126 00:33:06.560596 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:33:06 crc kubenswrapper[4875]: I1126 00:33:06.560956 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:33:06 crc kubenswrapper[4875]: I1126 00:33:06.605795 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:33:06 crc kubenswrapper[4875]: I1126 00:33:06.713942 4875 generic.go:334] "Generic (PLEG): container finished" podID="07908ebc-ce0d-47c9-83c5-b459c3c23620" containerID="b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187" exitCode=0 Nov 26 00:33:06 crc kubenswrapper[4875]: I1126 00:33:06.714024 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhkn2" event={"ID":"07908ebc-ce0d-47c9-83c5-b459c3c23620","Type":"ContainerDied","Data":"b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187"} Nov 26 00:33:06 crc kubenswrapper[4875]: I1126 00:33:06.717022 4875 generic.go:334] "Generic (PLEG): container finished" podID="bef9a3b8-a74c-4e4a-8b50-d216af962a40" containerID="bcbb01155d22740eb6f652b6ef3c1091dc174c1189ffc71052b4690fb36c0333" exitCode=0 Nov 26 00:33:06 crc kubenswrapper[4875]: I1126 00:33:06.717100 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tt82f" event={"ID":"bef9a3b8-a74c-4e4a-8b50-d216af962a40","Type":"ContainerDied","Data":"bcbb01155d22740eb6f652b6ef3c1091dc174c1189ffc71052b4690fb36c0333"} Nov 26 00:33:06 crc kubenswrapper[4875]: I1126 00:33:06.769654 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:33:07 crc kubenswrapper[4875]: E1126 00:33:07.091823 4875 cadvisor_stats_provider.go:516] "Partial failure issuing cadvisor.ContainerInfoV2" err="partial failures: [\"/kubepods.slice/kubepods-burstable.slice/kubepods-burstable-pod0d4fb5a7_3ade_467a_9640_d744b1ef0c8e.slice/crio-conmon-5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921.scope\": RecentStats: unable to find data in memory cache]" Nov 26 00:33:07 crc kubenswrapper[4875]: I1126 00:33:07.533526 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:33:07 crc kubenswrapper[4875]: E1126 00:33:07.534046 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:33:08 crc kubenswrapper[4875]: I1126 00:33:08.629717 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-546g6"] Nov 26 00:33:08 crc kubenswrapper[4875]: I1126 00:33:08.630765 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/certified-operators-546g6" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" containerName="registry-server" containerID="cri-o://deea4738b64295297a7a551858bcde358d05536744d187a91eae429329c1f11a" gracePeriod=2 Nov 26 00:33:08 crc kubenswrapper[4875]: I1126 00:33:08.730970 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tt82f" event={"ID":"bef9a3b8-a74c-4e4a-8b50-d216af962a40","Type":"ContainerStarted","Data":"c6b10e137db06cbc7a47b1a5c21b48353c6fcd14de7690aa8797a516c600e3cf"} Nov 26 00:33:08 crc kubenswrapper[4875]: I1126 00:33:08.733390 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhkn2" event={"ID":"07908ebc-ce0d-47c9-83c5-b459c3c23620","Type":"ContainerStarted","Data":"d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a"} Nov 26 00:33:08 crc kubenswrapper[4875]: I1126 00:33:08.755315 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/community-operators-tt82f" podStartSLOduration=1.786243383 podStartE2EDuration="7.755297616s" podCreationTimestamp="2025-11-26 00:33:01 +0000 UTC" firstStartedPulling="2025-11-26 00:33:02.599882377 +0000 UTC m=+1555.789858072" lastFinishedPulling="2025-11-26 00:33:08.56893661 +0000 UTC m=+1561.758912305" observedRunningTime="2025-11-26 00:33:08.751506404 +0000 UTC m=+1561.941482099" watchObservedRunningTime="2025-11-26 00:33:08.755297616 +0000 UTC m=+1561.945273311" Nov 26 00:33:08 crc kubenswrapper[4875]: I1126 00:33:08.781616 4875 pod_startup_latency_tracker.go:104] "Observed pod startup duration" pod="openshift-marketplace/redhat-operators-qhkn2" podStartSLOduration=1.8860045460000001 podStartE2EDuration="7.781591601s" podCreationTimestamp="2025-11-26 00:33:01 +0000 UTC" firstStartedPulling="2025-11-26 00:33:02.601216543 +0000 UTC m=+1555.791192238" lastFinishedPulling="2025-11-26 00:33:08.496803598 +0000 UTC m=+1561.686779293" observedRunningTime="2025-11-26 00:33:08.776998917 +0000 UTC m=+1561.966974612" watchObservedRunningTime="2025-11-26 00:33:08.781591601 +0000 UTC m=+1561.971567296" Nov 26 00:33:08 crc kubenswrapper[4875]: I1126 00:33:08.975364 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:33:08 crc kubenswrapper[4875]: I1126 00:33:08.975430 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:33:09 crc kubenswrapper[4875]: I1126 00:33:09.016764 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:33:09 crc kubenswrapper[4875]: I1126 00:33:09.154504 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:33:09 crc kubenswrapper[4875]: I1126 00:33:09.154561 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:33:09 crc kubenswrapper[4875]: I1126 00:33:09.206441 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:33:09 crc kubenswrapper[4875]: I1126 00:33:09.785079 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:33:09 crc kubenswrapper[4875]: I1126 00:33:09.785845 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:33:11 crc kubenswrapper[4875]: I1126 00:33:11.367853 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:11 crc kubenswrapper[4875]: I1126 00:33:11.367906 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:11 crc kubenswrapper[4875]: I1126 00:33:11.414858 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:11 crc kubenswrapper[4875]: I1126 00:33:11.509263 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-6mr68_20d9eaa7-e48a-4f9e-aa76-f7fe8494e270/cert-manager-controller/1.log" Nov 26 00:33:11 crc kubenswrapper[4875]: I1126 00:33:11.619691 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-86cb77c54b-6mr68_20d9eaa7-e48a-4f9e-aa76-f7fe8494e270/cert-manager-controller/0.log" Nov 26 00:33:11 crc kubenswrapper[4875]: I1126 00:33:11.816037 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-tcxqx_2bf438ed-b485-429e-ab20-c5f354a3ab25/cert-manager-cainjector/1.log" Nov 26 00:33:11 crc kubenswrapper[4875]: I1126 00:33:11.838998 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-cainjector-855d9ccff4-tcxqx_2bf438ed-b485-429e-ab20-c5f354a3ab25/cert-manager-cainjector/2.log" Nov 26 00:33:11 crc kubenswrapper[4875]: I1126 00:33:11.852214 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="" pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:11 crc kubenswrapper[4875]: I1126 00:33:11.852257 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="unhealthy" pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:11 crc kubenswrapper[4875]: I1126 00:33:11.955867 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/cert-manager_cert-manager-webhook-f4fb5df64-2bx8m_7b94883c-6fc2-4f66-a969-121d26394910/cert-manager-webhook/0.log" Nov 26 00:33:12 crc kubenswrapper[4875]: I1126 00:33:12.892173 4875 prober.go:107] "Probe failed" probeType="Startup" pod="openshift-marketplace/redhat-operators-qhkn2" podUID="07908ebc-ce0d-47c9-83c5-b459c3c23620" containerName="registry-server" probeResult="failure" output=< Nov 26 00:33:12 crc kubenswrapper[4875]: timeout: failed to connect service ":50051" within 1s Nov 26 00:33:12 crc kubenswrapper[4875]: > Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.595223 4875 generic.go:334] "Generic (PLEG): container finished" podID="8703e7d9-1478-4664-90fa-0980c97ccbec" containerID="deea4738b64295297a7a551858bcde358d05536744d187a91eae429329c1f11a" exitCode=0 Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.595311 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-546g6" event={"ID":"8703e7d9-1478-4664-90fa-0980c97ccbec","Type":"ContainerDied","Data":"deea4738b64295297a7a551858bcde358d05536744d187a91eae429329c1f11a"} Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.635160 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.700721 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-utilities\") pod \"8703e7d9-1478-4664-90fa-0980c97ccbec\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.700812 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-catalog-content\") pod \"8703e7d9-1478-4664-90fa-0980c97ccbec\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.700839 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-tct6s\" (UniqueName: \"kubernetes.io/projected/8703e7d9-1478-4664-90fa-0980c97ccbec-kube-api-access-tct6s\") pod \"8703e7d9-1478-4664-90fa-0980c97ccbec\" (UID: \"8703e7d9-1478-4664-90fa-0980c97ccbec\") " Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.701965 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-utilities" (OuterVolumeSpecName: "utilities") pod "8703e7d9-1478-4664-90fa-0980c97ccbec" (UID: "8703e7d9-1478-4664-90fa-0980c97ccbec"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.721518 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/8703e7d9-1478-4664-90fa-0980c97ccbec-kube-api-access-tct6s" (OuterVolumeSpecName: "kube-api-access-tct6s") pod "8703e7d9-1478-4664-90fa-0980c97ccbec" (UID: "8703e7d9-1478-4664-90fa-0980c97ccbec"). InnerVolumeSpecName "kube-api-access-tct6s". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.751919 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "8703e7d9-1478-4664-90fa-0980c97ccbec" (UID: "8703e7d9-1478-4664-90fa-0980c97ccbec"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.803033 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.803097 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/8703e7d9-1478-4664-90fa-0980c97ccbec-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:14 crc kubenswrapper[4875]: I1126 00:33:14.803111 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-tct6s\" (UniqueName: \"kubernetes.io/projected/8703e7d9-1478-4664-90fa-0980c97ccbec-kube-api-access-tct6s\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:15 crc kubenswrapper[4875]: I1126 00:33:15.604183 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/certified-operators-546g6" event={"ID":"8703e7d9-1478-4664-90fa-0980c97ccbec","Type":"ContainerDied","Data":"dc2e93f4e09909cc61bf30a8d2380a0b455450fa6033985d6d4803d1ccd9c7c4"} Nov 26 00:33:15 crc kubenswrapper[4875]: I1126 00:33:15.604240 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/certified-operators-546g6" Nov 26 00:33:15 crc kubenswrapper[4875]: I1126 00:33:15.604535 4875 scope.go:117] "RemoveContainer" containerID="deea4738b64295297a7a551858bcde358d05536744d187a91eae429329c1f11a" Nov 26 00:33:15 crc kubenswrapper[4875]: I1126 00:33:15.624063 4875 scope.go:117] "RemoveContainer" containerID="bcb8f2fbb307e255f4b27231a137a72597ec171534574c16d7f6d50c71ce7f73" Nov 26 00:33:15 crc kubenswrapper[4875]: I1126 00:33:15.641518 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/certified-operators-546g6"] Nov 26 00:33:15 crc kubenswrapper[4875]: I1126 00:33:15.647619 4875 scope.go:117] "RemoveContainer" containerID="3b8bbb67d3dfdba8e80fa8e3070c24fb5f146242f37421052622b0dbaa1edabc" Nov 26 00:33:15 crc kubenswrapper[4875]: I1126 00:33:15.655340 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/certified-operators-546g6"] Nov 26 00:33:17 crc kubenswrapper[4875]: I1126 00:33:17.536473 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="8703e7d9-1478-4664-90fa-0980c97ccbec" path="/var/lib/kubelet/pods/8703e7d9-1478-4664-90fa-0980c97ccbec/volumes" Nov 26 00:33:21 crc kubenswrapper[4875]: I1126 00:33:21.421914 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:21 crc kubenswrapper[4875]: I1126 00:33:21.529494 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:33:21 crc kubenswrapper[4875]: E1126 00:33:21.529706 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:33:21 crc kubenswrapper[4875]: I1126 00:33:21.893310 4875 kubelet.go:2542] "SyncLoop (probe)" probe="startup" status="started" pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:21 crc kubenswrapper[4875]: I1126 00:33:21.940022 4875 kubelet.go:2542] "SyncLoop (probe)" probe="readiness" status="ready" pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:23 crc kubenswrapper[4875]: I1126 00:33:23.427202 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-25nqc"] Nov 26 00:33:23 crc kubenswrapper[4875]: I1126 00:33:23.427489 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-25nqc" podUID="dc6d1620-c8a0-4c69-81db-8cf106f500d1" containerName="registry-server" containerID="cri-o://b30183f9e6336e215df32a7e85c1f82728f6c3130e365da925969d3f38ebbce0" gracePeriod=2 Nov 26 00:33:23 crc kubenswrapper[4875]: I1126 00:33:23.661040 4875 generic.go:334] "Generic (PLEG): container finished" podID="dc6d1620-c8a0-4c69-81db-8cf106f500d1" containerID="b30183f9e6336e215df32a7e85c1f82728f6c3130e365da925969d3f38ebbce0" exitCode=0 Nov 26 00:33:23 crc kubenswrapper[4875]: I1126 00:33:23.661108 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25nqc" event={"ID":"dc6d1620-c8a0-4c69-81db-8cf106f500d1","Type":"ContainerDied","Data":"b30183f9e6336e215df32a7e85c1f82728f6c3130e365da925969d3f38ebbce0"} Nov 26 00:33:23 crc kubenswrapper[4875]: I1126 00:33:23.851432 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:33:23 crc kubenswrapper[4875]: I1126 00:33:23.938370 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-f76mt\" (UniqueName: \"kubernetes.io/projected/dc6d1620-c8a0-4c69-81db-8cf106f500d1-kube-api-access-f76mt\") pod \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " Nov 26 00:33:23 crc kubenswrapper[4875]: I1126 00:33:23.938480 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-utilities\") pod \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " Nov 26 00:33:23 crc kubenswrapper[4875]: I1126 00:33:23.938510 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-catalog-content\") pod \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\" (UID: \"dc6d1620-c8a0-4c69-81db-8cf106f500d1\") " Nov 26 00:33:23 crc kubenswrapper[4875]: I1126 00:33:23.939927 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-utilities" (OuterVolumeSpecName: "utilities") pod "dc6d1620-c8a0-4c69-81db-8cf106f500d1" (UID: "dc6d1620-c8a0-4c69-81db-8cf106f500d1"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:23 crc kubenswrapper[4875]: I1126 00:33:23.943821 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/dc6d1620-c8a0-4c69-81db-8cf106f500d1-kube-api-access-f76mt" (OuterVolumeSpecName: "kube-api-access-f76mt") pod "dc6d1620-c8a0-4c69-81db-8cf106f500d1" (UID: "dc6d1620-c8a0-4c69-81db-8cf106f500d1"). InnerVolumeSpecName "kube-api-access-f76mt". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:33:23 crc kubenswrapper[4875]: I1126 00:33:23.990658 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "dc6d1620-c8a0-4c69-81db-8cf106f500d1" (UID: "dc6d1620-c8a0-4c69-81db-8cf106f500d1"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.039844 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.039875 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/dc6d1620-c8a0-4c69-81db-8cf106f500d1-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.039886 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-f76mt\" (UniqueName: \"kubernetes.io/projected/dc6d1620-c8a0-4c69-81db-8cf106f500d1-kube-api-access-f76mt\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.226540 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-647tp"] Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.226796 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-647tp" podUID="ff970fac-2516-42c4-bdab-6be6f2985fb9" containerName="registry-server" containerID="cri-o://fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3" gracePeriod=2 Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.608153 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-647tp" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.628325 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-249dr"] Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.628630 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-249dr" podUID="b2e91527-9faa-4e82-af09-21d90c108433" containerName="registry-server" containerID="cri-o://0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9" gracePeriod=2 Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.670842 4875 generic.go:334] "Generic (PLEG): container finished" podID="ff970fac-2516-42c4-bdab-6be6f2985fb9" containerID="fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3" exitCode=0 Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.670922 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-647tp" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.670926 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-647tp" event={"ID":"ff970fac-2516-42c4-bdab-6be6f2985fb9","Type":"ContainerDied","Data":"fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3"} Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.671104 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-647tp" event={"ID":"ff970fac-2516-42c4-bdab-6be6f2985fb9","Type":"ContainerDied","Data":"ad60d88b14eef34e6cd0db78ab2c52643af1f94c66dd41cb0f361a7e829b2299"} Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.671154 4875 scope.go:117] "RemoveContainer" containerID="fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.676210 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-25nqc" event={"ID":"dc6d1620-c8a0-4c69-81db-8cf106f500d1","Type":"ContainerDied","Data":"932a4b34b7e2b7377c25bea02c344d88587aa03e13d62120cd7a906aa6b393ba"} Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.676251 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-25nqc" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.739587 4875 scope.go:117] "RemoveContainer" containerID="6ac51d253ab237630b4c2d2924544dec85883e2c53f1b7d26c1ef13e6c5558b1" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.750142 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-catalog-content\") pod \"ff970fac-2516-42c4-bdab-6be6f2985fb9\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.750214 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-6tvwk\" (UniqueName: \"kubernetes.io/projected/ff970fac-2516-42c4-bdab-6be6f2985fb9-kube-api-access-6tvwk\") pod \"ff970fac-2516-42c4-bdab-6be6f2985fb9\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.750337 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-utilities\") pod \"ff970fac-2516-42c4-bdab-6be6f2985fb9\" (UID: \"ff970fac-2516-42c4-bdab-6be6f2985fb9\") " Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.751748 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-utilities" (OuterVolumeSpecName: "utilities") pod "ff970fac-2516-42c4-bdab-6be6f2985fb9" (UID: "ff970fac-2516-42c4-bdab-6be6f2985fb9"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.775135 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/ff970fac-2516-42c4-bdab-6be6f2985fb9-kube-api-access-6tvwk" (OuterVolumeSpecName: "kube-api-access-6tvwk") pod "ff970fac-2516-42c4-bdab-6be6f2985fb9" (UID: "ff970fac-2516-42c4-bdab-6be6f2985fb9"). InnerVolumeSpecName "kube-api-access-6tvwk". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.779327 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-25nqc"] Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.807714 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-25nqc"] Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.815425 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "ff970fac-2516-42c4-bdab-6be6f2985fb9" (UID: "ff970fac-2516-42c4-bdab-6be6f2985fb9"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.825774 4875 scope.go:117] "RemoveContainer" containerID="82ed9cd20db03cee5d5c2e34855398a3339118555f1fdf656329b9aafb84989b" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.835580 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w76tl"] Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.835820 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-w76tl" podUID="3b1ea123-2e62-46c9-829a-f62e2cb207a8" containerName="registry-server" containerID="cri-o://be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a" gracePeriod=2 Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.855271 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.855321 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-6tvwk\" (UniqueName: \"kubernetes.io/projected/ff970fac-2516-42c4-bdab-6be6f2985fb9-kube-api-access-6tvwk\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.855334 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/ff970fac-2516-42c4-bdab-6be6f2985fb9-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.863621 4875 scope.go:117] "RemoveContainer" containerID="fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3" Nov 26 00:33:24 crc kubenswrapper[4875]: E1126 00:33:24.868898 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3\": container with ID starting with fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3 not found: ID does not exist" containerID="fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.868962 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3"} err="failed to get container status \"fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3\": rpc error: code = NotFound desc = could not find container \"fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3\": container with ID starting with fe5769bc2f1ce6e341f28d96443de51cacd30c41a8a6a8b94891d10a169845a3 not found: ID does not exist" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.868999 4875 scope.go:117] "RemoveContainer" containerID="6ac51d253ab237630b4c2d2924544dec85883e2c53f1b7d26c1ef13e6c5558b1" Nov 26 00:33:24 crc kubenswrapper[4875]: E1126 00:33:24.869261 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"6ac51d253ab237630b4c2d2924544dec85883e2c53f1b7d26c1ef13e6c5558b1\": container with ID starting with 6ac51d253ab237630b4c2d2924544dec85883e2c53f1b7d26c1ef13e6c5558b1 not found: ID does not exist" containerID="6ac51d253ab237630b4c2d2924544dec85883e2c53f1b7d26c1ef13e6c5558b1" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.869291 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"6ac51d253ab237630b4c2d2924544dec85883e2c53f1b7d26c1ef13e6c5558b1"} err="failed to get container status \"6ac51d253ab237630b4c2d2924544dec85883e2c53f1b7d26c1ef13e6c5558b1\": rpc error: code = NotFound desc = could not find container \"6ac51d253ab237630b4c2d2924544dec85883e2c53f1b7d26c1ef13e6c5558b1\": container with ID starting with 6ac51d253ab237630b4c2d2924544dec85883e2c53f1b7d26c1ef13e6c5558b1 not found: ID does not exist" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.869311 4875 scope.go:117] "RemoveContainer" containerID="82ed9cd20db03cee5d5c2e34855398a3339118555f1fdf656329b9aafb84989b" Nov 26 00:33:24 crc kubenswrapper[4875]: E1126 00:33:24.869618 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"82ed9cd20db03cee5d5c2e34855398a3339118555f1fdf656329b9aafb84989b\": container with ID starting with 82ed9cd20db03cee5d5c2e34855398a3339118555f1fdf656329b9aafb84989b not found: ID does not exist" containerID="82ed9cd20db03cee5d5c2e34855398a3339118555f1fdf656329b9aafb84989b" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.869651 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"82ed9cd20db03cee5d5c2e34855398a3339118555f1fdf656329b9aafb84989b"} err="failed to get container status \"82ed9cd20db03cee5d5c2e34855398a3339118555f1fdf656329b9aafb84989b\": rpc error: code = NotFound desc = could not find container \"82ed9cd20db03cee5d5c2e34855398a3339118555f1fdf656329b9aafb84989b\": container with ID starting with 82ed9cd20db03cee5d5c2e34855398a3339118555f1fdf656329b9aafb84989b not found: ID does not exist" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.869671 4875 scope.go:117] "RemoveContainer" containerID="b30183f9e6336e215df32a7e85c1f82728f6c3130e365da925969d3f38ebbce0" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.900755 4875 scope.go:117] "RemoveContainer" containerID="0aae320e023b52ed51f67f3701af36ebce6882d879d59052a686aae8a73ea69a" Nov 26 00:33:24 crc kubenswrapper[4875]: I1126 00:33:24.927483 4875 scope.go:117] "RemoveContainer" containerID="9c652313d5bfd3150bfdc4581e0691376e968cd60de5d6365cb9e358a6e4ac3f" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.015519 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-647tp"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.020466 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-647tp"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.036106 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ntrhj"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.037558 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-ntrhj" podUID="fa43ec61-012b-42c0-bcc7-26a36a0db093" containerName="registry-server" containerID="cri-o://877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4" gracePeriod=2 Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.083119 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-249dr" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.159782 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-catalog-content\") pod \"b2e91527-9faa-4e82-af09-21d90c108433\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.159846 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-c2ccp\" (UniqueName: \"kubernetes.io/projected/b2e91527-9faa-4e82-af09-21d90c108433-kube-api-access-c2ccp\") pod \"b2e91527-9faa-4e82-af09-21d90c108433\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.159917 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-utilities\") pod \"b2e91527-9faa-4e82-af09-21d90c108433\" (UID: \"b2e91527-9faa-4e82-af09-21d90c108433\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.161175 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-utilities" (OuterVolumeSpecName: "utilities") pod "b2e91527-9faa-4e82-af09-21d90c108433" (UID: "b2e91527-9faa-4e82-af09-21d90c108433"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.164239 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/b2e91527-9faa-4e82-af09-21d90c108433-kube-api-access-c2ccp" (OuterVolumeSpecName: "kube-api-access-c2ccp") pod "b2e91527-9faa-4e82-af09-21d90c108433" (UID: "b2e91527-9faa-4e82-af09-21d90c108433"). InnerVolumeSpecName "kube-api-access-c2ccp". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.189135 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.215850 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "b2e91527-9faa-4e82-af09-21d90c108433" (UID: "b2e91527-9faa-4e82-af09-21d90c108433"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.229842 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2bmxq"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.230083 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-2bmxq" podUID="cf29d3d9-d456-4db2-b187-d3a5f1fa74d8" containerName="registry-server" containerID="cri-o://5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730" gracePeriod=2 Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.263050 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-utilities\") pod \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.263103 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-xnrqm\" (UniqueName: \"kubernetes.io/projected/3b1ea123-2e62-46c9-829a-f62e2cb207a8-kube-api-access-xnrqm\") pod \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.263124 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-catalog-content\") pod \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\" (UID: \"3b1ea123-2e62-46c9-829a-f62e2cb207a8\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.263530 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.263555 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-c2ccp\" (UniqueName: \"kubernetes.io/projected/b2e91527-9faa-4e82-af09-21d90c108433-kube-api-access-c2ccp\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.263572 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/b2e91527-9faa-4e82-af09-21d90c108433-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.264487 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-utilities" (OuterVolumeSpecName: "utilities") pod "3b1ea123-2e62-46c9-829a-f62e2cb207a8" (UID: "3b1ea123-2e62-46c9-829a-f62e2cb207a8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.271579 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/3b1ea123-2e62-46c9-829a-f62e2cb207a8-kube-api-access-xnrqm" (OuterVolumeSpecName: "kube-api-access-xnrqm") pod "3b1ea123-2e62-46c9-829a-f62e2cb207a8" (UID: "3b1ea123-2e62-46c9-829a-f62e2cb207a8"). InnerVolumeSpecName "kube-api-access-xnrqm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.347654 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "3b1ea123-2e62-46c9-829a-f62e2cb207a8" (UID: "3b1ea123-2e62-46c9-829a-f62e2cb207a8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.365739 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.365787 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-xnrqm\" (UniqueName: \"kubernetes.io/projected/3b1ea123-2e62-46c9-829a-f62e2cb207a8-kube-api-access-xnrqm\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.365801 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/3b1ea123-2e62-46c9-829a-f62e2cb207a8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.368234 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.430792 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tt82f"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.431034 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-tt82f" podUID="bef9a3b8-a74c-4e4a-8b50-d216af962a40" containerName="registry-server" containerID="cri-o://c6b10e137db06cbc7a47b1a5c21b48353c6fcd14de7690aa8797a516c600e3cf" gracePeriod=2 Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.466503 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-utilities\") pod \"fa43ec61-012b-42c0-bcc7-26a36a0db093\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.466596 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-catalog-content\") pod \"fa43ec61-012b-42c0-bcc7-26a36a0db093\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.466623 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-lbgbm\" (UniqueName: \"kubernetes.io/projected/fa43ec61-012b-42c0-bcc7-26a36a0db093-kube-api-access-lbgbm\") pod \"fa43ec61-012b-42c0-bcc7-26a36a0db093\" (UID: \"fa43ec61-012b-42c0-bcc7-26a36a0db093\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.476566 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/fa43ec61-012b-42c0-bcc7-26a36a0db093-kube-api-access-lbgbm" (OuterVolumeSpecName: "kube-api-access-lbgbm") pod "fa43ec61-012b-42c0-bcc7-26a36a0db093" (UID: "fa43ec61-012b-42c0-bcc7-26a36a0db093"). InnerVolumeSpecName "kube-api-access-lbgbm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.477980 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-utilities" (OuterVolumeSpecName: "utilities") pod "fa43ec61-012b-42c0-bcc7-26a36a0db093" (UID: "fa43ec61-012b-42c0-bcc7-26a36a0db093"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.525755 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "fa43ec61-012b-42c0-bcc7-26a36a0db093" (UID: "fa43ec61-012b-42c0-bcc7-26a36a0db093"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.537457 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="dc6d1620-c8a0-4c69-81db-8cf106f500d1" path="/var/lib/kubelet/pods/dc6d1620-c8a0-4c69-81db-8cf106f500d1/volumes" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.538060 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="ff970fac-2516-42c4-bdab-6be6f2985fb9" path="/var/lib/kubelet/pods/ff970fac-2516-42c4-bdab-6be6f2985fb9/volumes" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.568298 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.568341 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/fa43ec61-012b-42c0-bcc7-26a36a0db093-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.568355 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-lbgbm\" (UniqueName: \"kubernetes.io/projected/fa43ec61-012b-42c0-bcc7-26a36a0db093-kube-api-access-lbgbm\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.630868 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qhkn2"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.633805 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/redhat-operators-qhkn2" podUID="07908ebc-ce0d-47c9-83c5-b459c3c23620" containerName="registry-server" containerID="cri-o://d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a" gracePeriod=2 Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.639716 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.686870 4875 generic.go:334] "Generic (PLEG): container finished" podID="cf29d3d9-d456-4db2-b187-d3a5f1fa74d8" containerID="5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730" exitCode=0 Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.686950 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bmxq" event={"ID":"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8","Type":"ContainerDied","Data":"5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730"} Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.686985 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-2bmxq" event={"ID":"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8","Type":"ContainerDied","Data":"1fed9c3314a2f27139e229f1740238e57b21633e26b0c838ad36a6c281ea408e"} Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.687004 4875 scope.go:117] "RemoveContainer" containerID="5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.686999 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-2bmxq" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.691753 4875 generic.go:334] "Generic (PLEG): container finished" podID="bef9a3b8-a74c-4e4a-8b50-d216af962a40" containerID="c6b10e137db06cbc7a47b1a5c21b48353c6fcd14de7690aa8797a516c600e3cf" exitCode=0 Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.691790 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tt82f" event={"ID":"bef9a3b8-a74c-4e4a-8b50-d216af962a40","Type":"ContainerDied","Data":"c6b10e137db06cbc7a47b1a5c21b48353c6fcd14de7690aa8797a516c600e3cf"} Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.697936 4875 generic.go:334] "Generic (PLEG): container finished" podID="fa43ec61-012b-42c0-bcc7-26a36a0db093" containerID="877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4" exitCode=0 Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.698014 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ntrhj" event={"ID":"fa43ec61-012b-42c0-bcc7-26a36a0db093","Type":"ContainerDied","Data":"877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4"} Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.698044 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-ntrhj" event={"ID":"fa43ec61-012b-42c0-bcc7-26a36a0db093","Type":"ContainerDied","Data":"d77ebeedf37f32d6168d1a405f252022f3470c7b25fc685fda234188a05c0767"} Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.698116 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-ntrhj" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.705699 4875 generic.go:334] "Generic (PLEG): container finished" podID="b2e91527-9faa-4e82-af09-21d90c108433" containerID="0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9" exitCode=0 Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.705845 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-249dr" event={"ID":"b2e91527-9faa-4e82-af09-21d90c108433","Type":"ContainerDied","Data":"0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9"} Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.705872 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-249dr" event={"ID":"b2e91527-9faa-4e82-af09-21d90c108433","Type":"ContainerDied","Data":"9a69a18399ff09c0d6830e6e27ed73796d8fe52f3316057a87c619408b6d35ad"} Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.705984 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-249dr" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.714241 4875 generic.go:334] "Generic (PLEG): container finished" podID="3b1ea123-2e62-46c9-829a-f62e2cb207a8" containerID="be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a" exitCode=0 Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.714275 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w76tl" event={"ID":"3b1ea123-2e62-46c9-829a-f62e2cb207a8","Type":"ContainerDied","Data":"be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a"} Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.714296 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-w76tl" event={"ID":"3b1ea123-2e62-46c9-829a-f62e2cb207a8","Type":"ContainerDied","Data":"5c65844e98c0f05694d13644138d39b85b6c6814bb66396ad4411005d0a42e9e"} Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.714350 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-w76tl" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.749960 4875 scope.go:117] "RemoveContainer" containerID="e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.760022 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-ntrhj"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.770192 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-catalog-content\") pod \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.770325 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-utilities\") pod \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.770362 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bqbzm\" (UniqueName: \"kubernetes.io/projected/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-kube-api-access-bqbzm\") pod \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\" (UID: \"cf29d3d9-d456-4db2-b187-d3a5f1fa74d8\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.771032 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-utilities" (OuterVolumeSpecName: "utilities") pod "cf29d3d9-d456-4db2-b187-d3a5f1fa74d8" (UID: "cf29d3d9-d456-4db2-b187-d3a5f1fa74d8"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.775002 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.777546 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-kube-api-access-bqbzm" (OuterVolumeSpecName: "kube-api-access-bqbzm") pod "cf29d3d9-d456-4db2-b187-d3a5f1fa74d8" (UID: "cf29d3d9-d456-4db2-b187-d3a5f1fa74d8"). InnerVolumeSpecName "kube-api-access-bqbzm". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.783594 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-ntrhj"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.786933 4875 scope.go:117] "RemoveContainer" containerID="bfb90bfebcf662aa8640b0bbb46c703704bac4c196332c1a7d756ea9ebbb9cdf" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.792298 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-w76tl"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.797497 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-w76tl"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.802222 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-249dr"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.808248 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-249dr"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.828677 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bc62x"] Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.828980 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-bc62x" podUID="756b3d84-948c-4ca2-ab16-8097c2274da4" containerName="registry-server" containerID="cri-o://396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d" gracePeriod=2 Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.834942 4875 scope.go:117] "RemoveContainer" containerID="5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.835041 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:25 crc kubenswrapper[4875]: E1126 00:33:25.836063 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730\": container with ID starting with 5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730 not found: ID does not exist" containerID="5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.836156 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730"} err="failed to get container status \"5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730\": rpc error: code = NotFound desc = could not find container \"5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730\": container with ID starting with 5ec7afa4984661cc5b627c156f103b5086e7f2e5f3198384e9eaf0e9b2761730 not found: ID does not exist" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.836183 4875 scope.go:117] "RemoveContainer" containerID="e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024" Nov 26 00:33:25 crc kubenswrapper[4875]: E1126 00:33:25.836964 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024\": container with ID starting with e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024 not found: ID does not exist" containerID="e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.836997 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024"} err="failed to get container status \"e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024\": rpc error: code = NotFound desc = could not find container \"e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024\": container with ID starting with e17496c59083910e59dd6cac3dddfd03893e5e8031103ec15224389ce1235024 not found: ID does not exist" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.837198 4875 scope.go:117] "RemoveContainer" containerID="bfb90bfebcf662aa8640b0bbb46c703704bac4c196332c1a7d756ea9ebbb9cdf" Nov 26 00:33:25 crc kubenswrapper[4875]: E1126 00:33:25.837499 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"bfb90bfebcf662aa8640b0bbb46c703704bac4c196332c1a7d756ea9ebbb9cdf\": container with ID starting with bfb90bfebcf662aa8640b0bbb46c703704bac4c196332c1a7d756ea9ebbb9cdf not found: ID does not exist" containerID="bfb90bfebcf662aa8640b0bbb46c703704bac4c196332c1a7d756ea9ebbb9cdf" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.837530 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"bfb90bfebcf662aa8640b0bbb46c703704bac4c196332c1a7d756ea9ebbb9cdf"} err="failed to get container status \"bfb90bfebcf662aa8640b0bbb46c703704bac4c196332c1a7d756ea9ebbb9cdf\": rpc error: code = NotFound desc = could not find container \"bfb90bfebcf662aa8640b0bbb46c703704bac4c196332c1a7d756ea9ebbb9cdf\": container with ID starting with bfb90bfebcf662aa8640b0bbb46c703704bac4c196332c1a7d756ea9ebbb9cdf not found: ID does not exist" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.837549 4875 scope.go:117] "RemoveContainer" containerID="877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.866397 4875 scope.go:117] "RemoveContainer" containerID="061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.869807 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "cf29d3d9-d456-4db2-b187-d3a5f1fa74d8" (UID: "cf29d3d9-d456-4db2-b187-d3a5f1fa74d8"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.876671 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bqbzm\" (UniqueName: \"kubernetes.io/projected/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-kube-api-access-bqbzm\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.876698 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.881695 4875 scope.go:117] "RemoveContainer" containerID="16f27c2fd4e9a6fcd0f88579a1eca42b7bfa805a1e80b38c05fd0b76e02ecff9" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.953314 4875 scope.go:117] "RemoveContainer" containerID="877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4" Nov 26 00:33:25 crc kubenswrapper[4875]: E1126 00:33:25.953770 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4\": container with ID starting with 877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4 not found: ID does not exist" containerID="877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.953794 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4"} err="failed to get container status \"877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4\": rpc error: code = NotFound desc = could not find container \"877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4\": container with ID starting with 877a2eb758667e339e9ae4b10bee379e58b62d99caaf237d8d3547112f22b4b4 not found: ID does not exist" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.953814 4875 scope.go:117] "RemoveContainer" containerID="061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b" Nov 26 00:33:25 crc kubenswrapper[4875]: E1126 00:33:25.954061 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b\": container with ID starting with 061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b not found: ID does not exist" containerID="061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.954081 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b"} err="failed to get container status \"061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b\": rpc error: code = NotFound desc = could not find container \"061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b\": container with ID starting with 061091b05b8e2f94d71e9487436ce5e3bd67fc7dcd7bfb116474a9e754373d8b not found: ID does not exist" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.954093 4875 scope.go:117] "RemoveContainer" containerID="16f27c2fd4e9a6fcd0f88579a1eca42b7bfa805a1e80b38c05fd0b76e02ecff9" Nov 26 00:33:25 crc kubenswrapper[4875]: E1126 00:33:25.954449 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"16f27c2fd4e9a6fcd0f88579a1eca42b7bfa805a1e80b38c05fd0b76e02ecff9\": container with ID starting with 16f27c2fd4e9a6fcd0f88579a1eca42b7bfa805a1e80b38c05fd0b76e02ecff9 not found: ID does not exist" containerID="16f27c2fd4e9a6fcd0f88579a1eca42b7bfa805a1e80b38c05fd0b76e02ecff9" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.954468 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"16f27c2fd4e9a6fcd0f88579a1eca42b7bfa805a1e80b38c05fd0b76e02ecff9"} err="failed to get container status \"16f27c2fd4e9a6fcd0f88579a1eca42b7bfa805a1e80b38c05fd0b76e02ecff9\": rpc error: code = NotFound desc = could not find container \"16f27c2fd4e9a6fcd0f88579a1eca42b7bfa805a1e80b38c05fd0b76e02ecff9\": container with ID starting with 16f27c2fd4e9a6fcd0f88579a1eca42b7bfa805a1e80b38c05fd0b76e02ecff9 not found: ID does not exist" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.954481 4875 scope.go:117] "RemoveContainer" containerID="0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.968978 4875 scope.go:117] "RemoveContainer" containerID="95b705b1f2f1ab24d79d00d95d816c3b8eadc3e45c077c6c9278babe652e5b38" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.977732 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-catalog-content\") pod \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.977772 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-vj2ps\" (UniqueName: \"kubernetes.io/projected/bef9a3b8-a74c-4e4a-8b50-d216af962a40-kube-api-access-vj2ps\") pod \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.977878 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-utilities\") pod \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\" (UID: \"bef9a3b8-a74c-4e4a-8b50-d216af962a40\") " Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.978747 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-utilities" (OuterVolumeSpecName: "utilities") pod "bef9a3b8-a74c-4e4a-8b50-d216af962a40" (UID: "bef9a3b8-a74c-4e4a-8b50-d216af962a40"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.981097 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/bef9a3b8-a74c-4e4a-8b50-d216af962a40-kube-api-access-vj2ps" (OuterVolumeSpecName: "kube-api-access-vj2ps") pod "bef9a3b8-a74c-4e4a-8b50-d216af962a40" (UID: "bef9a3b8-a74c-4e4a-8b50-d216af962a40"). InnerVolumeSpecName "kube-api-access-vj2ps". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:33:25 crc kubenswrapper[4875]: I1126 00:33:25.995499 4875 scope.go:117] "RemoveContainer" containerID="1117a8976de610b780f60f3de93cc6ab5f3accfcbf05e899ca736225df1f0c23" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.013588 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.018965 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-2bmxq"] Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.022481 4875 scope.go:117] "RemoveContainer" containerID="0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9" Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.022957 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9\": container with ID starting with 0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9 not found: ID does not exist" containerID="0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.022994 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9"} err="failed to get container status \"0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9\": rpc error: code = NotFound desc = could not find container \"0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9\": container with ID starting with 0e81f7939fe409c993ded4b43b21f24ed8320e32492594dcd193e571f2c3d6e9 not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.023019 4875 scope.go:117] "RemoveContainer" containerID="95b705b1f2f1ab24d79d00d95d816c3b8eadc3e45c077c6c9278babe652e5b38" Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.023870 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"95b705b1f2f1ab24d79d00d95d816c3b8eadc3e45c077c6c9278babe652e5b38\": container with ID starting with 95b705b1f2f1ab24d79d00d95d816c3b8eadc3e45c077c6c9278babe652e5b38 not found: ID does not exist" containerID="95b705b1f2f1ab24d79d00d95d816c3b8eadc3e45c077c6c9278babe652e5b38" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.023902 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"95b705b1f2f1ab24d79d00d95d816c3b8eadc3e45c077c6c9278babe652e5b38"} err="failed to get container status \"95b705b1f2f1ab24d79d00d95d816c3b8eadc3e45c077c6c9278babe652e5b38\": rpc error: code = NotFound desc = could not find container \"95b705b1f2f1ab24d79d00d95d816c3b8eadc3e45c077c6c9278babe652e5b38\": container with ID starting with 95b705b1f2f1ab24d79d00d95d816c3b8eadc3e45c077c6c9278babe652e5b38 not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.023920 4875 scope.go:117] "RemoveContainer" containerID="1117a8976de610b780f60f3de93cc6ab5f3accfcbf05e899ca736225df1f0c23" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.029617 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-2bmxq"] Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.030700 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1117a8976de610b780f60f3de93cc6ab5f3accfcbf05e899ca736225df1f0c23\": container with ID starting with 1117a8976de610b780f60f3de93cc6ab5f3accfcbf05e899ca736225df1f0c23 not found: ID does not exist" containerID="1117a8976de610b780f60f3de93cc6ab5f3accfcbf05e899ca736225df1f0c23" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.030743 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1117a8976de610b780f60f3de93cc6ab5f3accfcbf05e899ca736225df1f0c23"} err="failed to get container status \"1117a8976de610b780f60f3de93cc6ab5f3accfcbf05e899ca736225df1f0c23\": rpc error: code = NotFound desc = could not find container \"1117a8976de610b780f60f3de93cc6ab5f3accfcbf05e899ca736225df1f0c23\": container with ID starting with 1117a8976de610b780f60f3de93cc6ab5f3accfcbf05e899ca736225df1f0c23 not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.030775 4875 scope.go:117] "RemoveContainer" containerID="be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.035221 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "bef9a3b8-a74c-4e4a-8b50-d216af962a40" (UID: "bef9a3b8-a74c-4e4a-8b50-d216af962a40"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.050576 4875 scope.go:117] "RemoveContainer" containerID="df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.068859 4875 scope.go:117] "RemoveContainer" containerID="05a18c7faeb164544afccfa6226c0f65cedcdc005df6e33eedb2b44e7a5e14ab" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.083711 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-bnt62\" (UniqueName: \"kubernetes.io/projected/07908ebc-ce0d-47c9-83c5-b459c3c23620-kube-api-access-bnt62\") pod \"07908ebc-ce0d-47c9-83c5-b459c3c23620\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.084071 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-catalog-content\") pod \"07908ebc-ce0d-47c9-83c5-b459c3c23620\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.085593 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-utilities\") pod \"07908ebc-ce0d-47c9-83c5-b459c3c23620\" (UID: \"07908ebc-ce0d-47c9-83c5-b459c3c23620\") " Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.085913 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.085929 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/bef9a3b8-a74c-4e4a-8b50-d216af962a40-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.085941 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-vj2ps\" (UniqueName: \"kubernetes.io/projected/bef9a3b8-a74c-4e4a-8b50-d216af962a40-kube-api-access-vj2ps\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.087732 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-utilities" (OuterVolumeSpecName: "utilities") pod "07908ebc-ce0d-47c9-83c5-b459c3c23620" (UID: "07908ebc-ce0d-47c9-83c5-b459c3c23620"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.088164 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/07908ebc-ce0d-47c9-83c5-b459c3c23620-kube-api-access-bnt62" (OuterVolumeSpecName: "kube-api-access-bnt62") pod "07908ebc-ce0d-47c9-83c5-b459c3c23620" (UID: "07908ebc-ce0d-47c9-83c5-b459c3c23620"). InnerVolumeSpecName "kube-api-access-bnt62". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.090550 4875 scope.go:117] "RemoveContainer" containerID="be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a" Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.094208 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a\": container with ID starting with be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a not found: ID does not exist" containerID="be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.094249 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a"} err="failed to get container status \"be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a\": rpc error: code = NotFound desc = could not find container \"be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a\": container with ID starting with be20818a6aef76f9b56c2c35a877e405e9fb80763ea77098732039955bcdec1a not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.094273 4875 scope.go:117] "RemoveContainer" containerID="df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a" Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.094632 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a\": container with ID starting with df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a not found: ID does not exist" containerID="df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.094654 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a"} err="failed to get container status \"df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a\": rpc error: code = NotFound desc = could not find container \"df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a\": container with ID starting with df7279783322857708b95637067bed50e687ff2f590b470881ac23eb27a0c37a not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.094673 4875 scope.go:117] "RemoveContainer" containerID="05a18c7faeb164544afccfa6226c0f65cedcdc005df6e33eedb2b44e7a5e14ab" Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.094975 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"05a18c7faeb164544afccfa6226c0f65cedcdc005df6e33eedb2b44e7a5e14ab\": container with ID starting with 05a18c7faeb164544afccfa6226c0f65cedcdc005df6e33eedb2b44e7a5e14ab not found: ID does not exist" containerID="05a18c7faeb164544afccfa6226c0f65cedcdc005df6e33eedb2b44e7a5e14ab" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.094997 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"05a18c7faeb164544afccfa6226c0f65cedcdc005df6e33eedb2b44e7a5e14ab"} err="failed to get container status \"05a18c7faeb164544afccfa6226c0f65cedcdc005df6e33eedb2b44e7a5e14ab\": rpc error: code = NotFound desc = could not find container \"05a18c7faeb164544afccfa6226c0f65cedcdc005df6e33eedb2b44e7a5e14ab\": container with ID starting with 05a18c7faeb164544afccfa6226c0f65cedcdc005df6e33eedb2b44e7a5e14ab not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.156373 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.187625 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-bnt62\" (UniqueName: \"kubernetes.io/projected/07908ebc-ce0d-47c9-83c5-b459c3c23620-kube-api-access-bnt62\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.187658 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.190376 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "07908ebc-ce0d-47c9-83c5-b459c3c23620" (UID: "07908ebc-ce0d-47c9-83c5-b459c3c23620"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.232008 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rfmwb"] Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.232286 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-marketplace/community-operators-rfmwb" podUID="136bbb86-4bff-47a2-947c-399c8bed11ca" containerName="registry-server" containerID="cri-o://ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2" gracePeriod=2 Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.288425 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-794hh\" (UniqueName: \"kubernetes.io/projected/756b3d84-948c-4ca2-ab16-8097c2274da4-kube-api-access-794hh\") pod \"756b3d84-948c-4ca2-ab16-8097c2274da4\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.288516 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-catalog-content\") pod \"756b3d84-948c-4ca2-ab16-8097c2274da4\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.288562 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-utilities\") pod \"756b3d84-948c-4ca2-ab16-8097c2274da4\" (UID: \"756b3d84-948c-4ca2-ab16-8097c2274da4\") " Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.288830 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/07908ebc-ce0d-47c9-83c5-b459c3c23620-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.289475 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-utilities" (OuterVolumeSpecName: "utilities") pod "756b3d84-948c-4ca2-ab16-8097c2274da4" (UID: "756b3d84-948c-4ca2-ab16-8097c2274da4"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.292134 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/756b3d84-948c-4ca2-ab16-8097c2274da4-kube-api-access-794hh" (OuterVolumeSpecName: "kube-api-access-794hh") pod "756b3d84-948c-4ca2-ab16-8097c2274da4" (UID: "756b3d84-948c-4ca2-ab16-8097c2274da4"). InnerVolumeSpecName "kube-api-access-794hh". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.337026 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "756b3d84-948c-4ca2-ab16-8097c2274da4" (UID: "756b3d84-948c-4ca2-ab16-8097c2274da4"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.389728 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.389758 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/756b3d84-948c-4ca2-ab16-8097c2274da4-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.389768 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-794hh\" (UniqueName: \"kubernetes.io/projected/756b3d84-948c-4ca2-ab16-8097c2274da4-kube-api-access-794hh\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.544951 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.693135 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-b4p8w\" (UniqueName: \"kubernetes.io/projected/136bbb86-4bff-47a2-947c-399c8bed11ca-kube-api-access-b4p8w\") pod \"136bbb86-4bff-47a2-947c-399c8bed11ca\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.693198 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-utilities\") pod \"136bbb86-4bff-47a2-947c-399c8bed11ca\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.693351 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-catalog-content\") pod \"136bbb86-4bff-47a2-947c-399c8bed11ca\" (UID: \"136bbb86-4bff-47a2-947c-399c8bed11ca\") " Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.694032 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-utilities" (OuterVolumeSpecName: "utilities") pod "136bbb86-4bff-47a2-947c-399c8bed11ca" (UID: "136bbb86-4bff-47a2-947c-399c8bed11ca"). InnerVolumeSpecName "utilities". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.706318 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/136bbb86-4bff-47a2-947c-399c8bed11ca-kube-api-access-b4p8w" (OuterVolumeSpecName: "kube-api-access-b4p8w") pod "136bbb86-4bff-47a2-947c-399c8bed11ca" (UID: "136bbb86-4bff-47a2-947c-399c8bed11ca"). InnerVolumeSpecName "kube-api-access-b4p8w". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.724328 4875 generic.go:334] "Generic (PLEG): container finished" podID="756b3d84-948c-4ca2-ab16-8097c2274da4" containerID="396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d" exitCode=0 Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.724401 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-bc62x" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.724450 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bc62x" event={"ID":"756b3d84-948c-4ca2-ab16-8097c2274da4","Type":"ContainerDied","Data":"396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d"} Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.724501 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-bc62x" event={"ID":"756b3d84-948c-4ca2-ab16-8097c2274da4","Type":"ContainerDied","Data":"133fba7a5f5563f78c64ea6b67c7a288e3639953b45c2b4f4aaf780886e93c58"} Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.724518 4875 scope.go:117] "RemoveContainer" containerID="396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.730849 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-tt82f" event={"ID":"bef9a3b8-a74c-4e4a-8b50-d216af962a40","Type":"ContainerDied","Data":"ebc15e7bef24745d57ddcc3a975f2da0b2496820be005f167dc9d92cbbac1ed9"} Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.730964 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-tt82f" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.734271 4875 generic.go:334] "Generic (PLEG): container finished" podID="07908ebc-ce0d-47c9-83c5-b459c3c23620" containerID="d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a" exitCode=0 Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.734453 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhkn2" event={"ID":"07908ebc-ce0d-47c9-83c5-b459c3c23620","Type":"ContainerDied","Data":"d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a"} Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.734520 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/redhat-operators-qhkn2" event={"ID":"07908ebc-ce0d-47c9-83c5-b459c3c23620","Type":"ContainerDied","Data":"450a11dda5cd27e3e94082e839ef3c71e5d7e0e482761f2a75c4a06d201c5c3f"} Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.734716 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/redhat-operators-qhkn2" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.757990 4875 scope.go:117] "RemoveContainer" containerID="2b3c37aaa085fd2049e5fa169483a513e863ef86a9d33edc7ff771839a56b1c5" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.758982 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-catalog-content" (OuterVolumeSpecName: "catalog-content") pod "136bbb86-4bff-47a2-947c-399c8bed11ca" (UID: "136bbb86-4bff-47a2-947c-399c8bed11ca"). InnerVolumeSpecName "catalog-content". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.795398 4875 reconciler_common.go:293] "Volume detached for volume \"catalog-content\" (UniqueName: \"kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-catalog-content\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.795458 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-b4p8w\" (UniqueName: \"kubernetes.io/projected/136bbb86-4bff-47a2-947c-399c8bed11ca-kube-api-access-b4p8w\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.795474 4875 reconciler_common.go:293] "Volume detached for volume \"utilities\" (UniqueName: \"kubernetes.io/empty-dir/136bbb86-4bff-47a2-947c-399c8bed11ca-utilities\") on node \"crc\" DevicePath \"\"" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.797308 4875 scope.go:117] "RemoveContainer" containerID="816b9f8199e4a2f162c48d1cc226c50d07d89355ce2d827743550d6f18332011" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.809185 4875 generic.go:334] "Generic (PLEG): container finished" podID="136bbb86-4bff-47a2-947c-399c8bed11ca" containerID="ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2" exitCode=0 Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.809238 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfmwb" event={"ID":"136bbb86-4bff-47a2-947c-399c8bed11ca","Type":"ContainerDied","Data":"ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2"} Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.809267 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-marketplace/community-operators-rfmwb" event={"ID":"136bbb86-4bff-47a2-947c-399c8bed11ca","Type":"ContainerDied","Data":"bf1a14709f804059e7e97eb5711e0f5c57d33d703b87b678a0f53a77917c5f62"} Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.809338 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-marketplace/community-operators-rfmwb" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.842554 4875 scope.go:117] "RemoveContainer" containerID="396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.843054 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/redhat-operators-qhkn2"] Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.848734 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d\": container with ID starting with 396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d not found: ID does not exist" containerID="396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.848803 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d"} err="failed to get container status \"396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d\": rpc error: code = NotFound desc = could not find container \"396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d\": container with ID starting with 396f8833e25627ae79bb0654a558b54ede83fd08aadc8450e9f401f80e35d74d not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.848838 4875 scope.go:117] "RemoveContainer" containerID="2b3c37aaa085fd2049e5fa169483a513e863ef86a9d33edc7ff771839a56b1c5" Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.853169 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"2b3c37aaa085fd2049e5fa169483a513e863ef86a9d33edc7ff771839a56b1c5\": container with ID starting with 2b3c37aaa085fd2049e5fa169483a513e863ef86a9d33edc7ff771839a56b1c5 not found: ID does not exist" containerID="2b3c37aaa085fd2049e5fa169483a513e863ef86a9d33edc7ff771839a56b1c5" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.853212 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"2b3c37aaa085fd2049e5fa169483a513e863ef86a9d33edc7ff771839a56b1c5"} err="failed to get container status \"2b3c37aaa085fd2049e5fa169483a513e863ef86a9d33edc7ff771839a56b1c5\": rpc error: code = NotFound desc = could not find container \"2b3c37aaa085fd2049e5fa169483a513e863ef86a9d33edc7ff771839a56b1c5\": container with ID starting with 2b3c37aaa085fd2049e5fa169483a513e863ef86a9d33edc7ff771839a56b1c5 not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.853235 4875 scope.go:117] "RemoveContainer" containerID="816b9f8199e4a2f162c48d1cc226c50d07d89355ce2d827743550d6f18332011" Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.854032 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"816b9f8199e4a2f162c48d1cc226c50d07d89355ce2d827743550d6f18332011\": container with ID starting with 816b9f8199e4a2f162c48d1cc226c50d07d89355ce2d827743550d6f18332011 not found: ID does not exist" containerID="816b9f8199e4a2f162c48d1cc226c50d07d89355ce2d827743550d6f18332011" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.854060 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"816b9f8199e4a2f162c48d1cc226c50d07d89355ce2d827743550d6f18332011"} err="failed to get container status \"816b9f8199e4a2f162c48d1cc226c50d07d89355ce2d827743550d6f18332011\": rpc error: code = NotFound desc = could not find container \"816b9f8199e4a2f162c48d1cc226c50d07d89355ce2d827743550d6f18332011\": container with ID starting with 816b9f8199e4a2f162c48d1cc226c50d07d89355ce2d827743550d6f18332011 not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.854078 4875 scope.go:117] "RemoveContainer" containerID="c6b10e137db06cbc7a47b1a5c21b48353c6fcd14de7690aa8797a516c600e3cf" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.858479 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/redhat-operators-qhkn2"] Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.862581 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-bc62x"] Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.868986 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-bc62x"] Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.875714 4875 scope.go:117] "RemoveContainer" containerID="bcbb01155d22740eb6f652b6ef3c1091dc174c1189ffc71052b4690fb36c0333" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.879363 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-tt82f"] Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.894674 4875 scope.go:117] "RemoveContainer" containerID="3b3b31d4edfb5d89bb423019bb5d7580341766e544ad51c3551fd775b486b7b0" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.906974 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-tt82f"] Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.914988 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-marketplace/community-operators-rfmwb"] Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.919070 4875 scope.go:117] "RemoveContainer" containerID="d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.921993 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-marketplace/community-operators-rfmwb"] Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.931787 4875 scope.go:117] "RemoveContainer" containerID="b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.949043 4875 scope.go:117] "RemoveContainer" containerID="45d36ebfb57d4be58053da89fdfdf398d7dd13813f9ef80a73d0dcede673b5f5" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.964270 4875 scope.go:117] "RemoveContainer" containerID="d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a" Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.964631 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a\": container with ID starting with d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a not found: ID does not exist" containerID="d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.964678 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a"} err="failed to get container status \"d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a\": rpc error: code = NotFound desc = could not find container \"d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a\": container with ID starting with d2d7c5892f33809c35ef125c9d3ba109a0576cf64935ce70b983abc84cae7b7a not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.964702 4875 scope.go:117] "RemoveContainer" containerID="b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187" Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.965116 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187\": container with ID starting with b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187 not found: ID does not exist" containerID="b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.965146 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187"} err="failed to get container status \"b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187\": rpc error: code = NotFound desc = could not find container \"b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187\": container with ID starting with b7839aed0eaaf6af87afd5946bb010bdc65a1e2ea8a50076830c7935e6543187 not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.965163 4875 scope.go:117] "RemoveContainer" containerID="45d36ebfb57d4be58053da89fdfdf398d7dd13813f9ef80a73d0dcede673b5f5" Nov 26 00:33:26 crc kubenswrapper[4875]: E1126 00:33:26.965464 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"45d36ebfb57d4be58053da89fdfdf398d7dd13813f9ef80a73d0dcede673b5f5\": container with ID starting with 45d36ebfb57d4be58053da89fdfdf398d7dd13813f9ef80a73d0dcede673b5f5 not found: ID does not exist" containerID="45d36ebfb57d4be58053da89fdfdf398d7dd13813f9ef80a73d0dcede673b5f5" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.965504 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"45d36ebfb57d4be58053da89fdfdf398d7dd13813f9ef80a73d0dcede673b5f5"} err="failed to get container status \"45d36ebfb57d4be58053da89fdfdf398d7dd13813f9ef80a73d0dcede673b5f5\": rpc error: code = NotFound desc = could not find container \"45d36ebfb57d4be58053da89fdfdf398d7dd13813f9ef80a73d0dcede673b5f5\": container with ID starting with 45d36ebfb57d4be58053da89fdfdf398d7dd13813f9ef80a73d0dcede673b5f5 not found: ID does not exist" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.965520 4875 scope.go:117] "RemoveContainer" containerID="ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2" Nov 26 00:33:26 crc kubenswrapper[4875]: I1126 00:33:26.979095 4875 scope.go:117] "RemoveContainer" containerID="47b5544b34aeabb49ab968d047a4557bd551a4e8267d5fc24cbcef8c1a686b98" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:26.995098 4875 scope.go:117] "RemoveContainer" containerID="1adfc7761690b44a2d4351a63e7bc2fa5a0cfd9c2df23f6deaee279c0052f15c" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.015140 4875 scope.go:117] "RemoveContainer" containerID="ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2" Nov 26 00:33:27 crc kubenswrapper[4875]: E1126 00:33:27.015735 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2\": container with ID starting with ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2 not found: ID does not exist" containerID="ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.015770 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2"} err="failed to get container status \"ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2\": rpc error: code = NotFound desc = could not find container \"ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2\": container with ID starting with ba1e704ab0e4a6e144cbe56b578a289204af686036e4286a83164051872453b2 not found: ID does not exist" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.015798 4875 scope.go:117] "RemoveContainer" containerID="47b5544b34aeabb49ab968d047a4557bd551a4e8267d5fc24cbcef8c1a686b98" Nov 26 00:33:27 crc kubenswrapper[4875]: E1126 00:33:27.016125 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"47b5544b34aeabb49ab968d047a4557bd551a4e8267d5fc24cbcef8c1a686b98\": container with ID starting with 47b5544b34aeabb49ab968d047a4557bd551a4e8267d5fc24cbcef8c1a686b98 not found: ID does not exist" containerID="47b5544b34aeabb49ab968d047a4557bd551a4e8267d5fc24cbcef8c1a686b98" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.016152 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"47b5544b34aeabb49ab968d047a4557bd551a4e8267d5fc24cbcef8c1a686b98"} err="failed to get container status \"47b5544b34aeabb49ab968d047a4557bd551a4e8267d5fc24cbcef8c1a686b98\": rpc error: code = NotFound desc = could not find container \"47b5544b34aeabb49ab968d047a4557bd551a4e8267d5fc24cbcef8c1a686b98\": container with ID starting with 47b5544b34aeabb49ab968d047a4557bd551a4e8267d5fc24cbcef8c1a686b98 not found: ID does not exist" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.016170 4875 scope.go:117] "RemoveContainer" containerID="1adfc7761690b44a2d4351a63e7bc2fa5a0cfd9c2df23f6deaee279c0052f15c" Nov 26 00:33:27 crc kubenswrapper[4875]: E1126 00:33:27.017560 4875 log.go:32] "ContainerStatus from runtime service failed" err="rpc error: code = NotFound desc = could not find container \"1adfc7761690b44a2d4351a63e7bc2fa5a0cfd9c2df23f6deaee279c0052f15c\": container with ID starting with 1adfc7761690b44a2d4351a63e7bc2fa5a0cfd9c2df23f6deaee279c0052f15c not found: ID does not exist" containerID="1adfc7761690b44a2d4351a63e7bc2fa5a0cfd9c2df23f6deaee279c0052f15c" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.017590 4875 pod_container_deletor.go:53] "DeleteContainer returned error" containerID={"Type":"cri-o","ID":"1adfc7761690b44a2d4351a63e7bc2fa5a0cfd9c2df23f6deaee279c0052f15c"} err="failed to get container status \"1adfc7761690b44a2d4351a63e7bc2fa5a0cfd9c2df23f6deaee279c0052f15c\": rpc error: code = NotFound desc = could not find container \"1adfc7761690b44a2d4351a63e7bc2fa5a0cfd9c2df23f6deaee279c0052f15c\": container with ID starting with 1adfc7761690b44a2d4351a63e7bc2fa5a0cfd9c2df23f6deaee279c0052f15c not found: ID does not exist" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.537869 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="07908ebc-ce0d-47c9-83c5-b459c3c23620" path="/var/lib/kubelet/pods/07908ebc-ce0d-47c9-83c5-b459c3c23620/volumes" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.538513 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="136bbb86-4bff-47a2-947c-399c8bed11ca" path="/var/lib/kubelet/pods/136bbb86-4bff-47a2-947c-399c8bed11ca/volumes" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.539121 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="3b1ea123-2e62-46c9-829a-f62e2cb207a8" path="/var/lib/kubelet/pods/3b1ea123-2e62-46c9-829a-f62e2cb207a8/volumes" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.540361 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="756b3d84-948c-4ca2-ab16-8097c2274da4" path="/var/lib/kubelet/pods/756b3d84-948c-4ca2-ab16-8097c2274da4/volumes" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.541063 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="b2e91527-9faa-4e82-af09-21d90c108433" path="/var/lib/kubelet/pods/b2e91527-9faa-4e82-af09-21d90c108433/volumes" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.542255 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="bef9a3b8-a74c-4e4a-8b50-d216af962a40" path="/var/lib/kubelet/pods/bef9a3b8-a74c-4e4a-8b50-d216af962a40/volumes" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.542993 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="cf29d3d9-d456-4db2-b187-d3a5f1fa74d8" path="/var/lib/kubelet/pods/cf29d3d9-d456-4db2-b187-d3a5f1fa74d8/volumes" Nov 26 00:33:27 crc kubenswrapper[4875]: I1126 00:33:27.543682 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="fa43ec61-012b-42c0-bcc7-26a36a0db093" path="/var/lib/kubelet/pods/fa43ec61-012b-42c0-bcc7-26a36a0db093/volumes" Nov 26 00:33:36 crc kubenswrapper[4875]: I1126 00:33:36.528724 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:33:36 crc kubenswrapper[4875]: E1126 00:33:36.529501 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:33:50 crc kubenswrapper[4875]: I1126 00:33:50.529514 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:33:50 crc kubenswrapper[4875]: E1126 00:33:50.530249 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:34:00 crc kubenswrapper[4875]: I1126 00:34:00.023975 4875 generic.go:334] "Generic (PLEG): container finished" podID="de1b9642-5b3f-490f-a7bd-adf4247f7395" containerID="0d321cc2dd03632f28e804867ab96d44be80d70572c982411c611c2f4e34c276" exitCode=0 Nov 26 00:34:00 crc kubenswrapper[4875]: I1126 00:34:00.024028 4875 kubelet.go:2453] "SyncLoop (PLEG): event for pod" pod="openshift-must-gather-55lm5/must-gather-kdgps" event={"ID":"de1b9642-5b3f-490f-a7bd-adf4247f7395","Type":"ContainerDied","Data":"0d321cc2dd03632f28e804867ab96d44be80d70572c982411c611c2f4e34c276"} Nov 26 00:34:00 crc kubenswrapper[4875]: I1126 00:34:00.025040 4875 scope.go:117] "RemoveContainer" containerID="0d321cc2dd03632f28e804867ab96d44be80d70572c982411c611c2f4e34c276" Nov 26 00:34:00 crc kubenswrapper[4875]: I1126 00:34:00.380750 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-55lm5_must-gather-kdgps_de1b9642-5b3f-490f-a7bd-adf4247f7395/gather/0.log" Nov 26 00:34:05 crc kubenswrapper[4875]: I1126 00:34:05.529557 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:34:05 crc kubenswrapper[4875]: E1126 00:34:05.531235 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:34:06 crc kubenswrapper[4875]: I1126 00:34:06.751865 4875 kubelet.go:2437] "SyncLoop DELETE" source="api" pods=["openshift-must-gather-55lm5/must-gather-kdgps"] Nov 26 00:34:06 crc kubenswrapper[4875]: I1126 00:34:06.754007 4875 kuberuntime_container.go:808] "Killing container with a grace period" pod="openshift-must-gather-55lm5/must-gather-kdgps" podUID="de1b9642-5b3f-490f-a7bd-adf4247f7395" containerName="copy" containerID="cri-o://7735f0ce65cc2d8af73b5384418079ea22254feb4de096bc188f3cd5623f239a" gracePeriod=2 Nov 26 00:34:06 crc kubenswrapper[4875]: I1126 00:34:06.757531 4875 kubelet.go:2431] "SyncLoop REMOVE" source="api" pods=["openshift-must-gather-55lm5/must-gather-kdgps"] Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.080332 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-55lm5_must-gather-kdgps_de1b9642-5b3f-490f-a7bd-adf4247f7395/copy/0.log" Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.081141 4875 generic.go:334] "Generic (PLEG): container finished" podID="de1b9642-5b3f-490f-a7bd-adf4247f7395" containerID="7735f0ce65cc2d8af73b5384418079ea22254feb4de096bc188f3cd5623f239a" exitCode=143 Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.081191 4875 pod_container_deletor.go:80] "Container not found in pod's containers" containerID="4d62e6f3a454d5fff15e21fe10b195a23097fc1be2111dafb365a49415dfab4b" Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.113717 4875 log.go:25] "Finished parsing log file" path="/var/log/pods/openshift-must-gather-55lm5_must-gather-kdgps_de1b9642-5b3f-490f-a7bd-adf4247f7395/copy/0.log" Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.114094 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-55lm5/must-gather-kdgps" Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.297298 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de1b9642-5b3f-490f-a7bd-adf4247f7395-must-gather-output\") pod \"de1b9642-5b3f-490f-a7bd-adf4247f7395\" (UID: \"de1b9642-5b3f-490f-a7bd-adf4247f7395\") " Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.297667 4875 reconciler_common.go:159] "operationExecutor.UnmountVolume started for volume \"kube-api-access-w67gx\" (UniqueName: \"kubernetes.io/projected/de1b9642-5b3f-490f-a7bd-adf4247f7395-kube-api-access-w67gx\") pod \"de1b9642-5b3f-490f-a7bd-adf4247f7395\" (UID: \"de1b9642-5b3f-490f-a7bd-adf4247f7395\") " Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.303273 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/projected/de1b9642-5b3f-490f-a7bd-adf4247f7395-kube-api-access-w67gx" (OuterVolumeSpecName: "kube-api-access-w67gx") pod "de1b9642-5b3f-490f-a7bd-adf4247f7395" (UID: "de1b9642-5b3f-490f-a7bd-adf4247f7395"). InnerVolumeSpecName "kube-api-access-w67gx". PluginName "kubernetes.io/projected", VolumeGidValue "" Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.332215 4875 operation_generator.go:803] UnmountVolume.TearDown succeeded for volume "kubernetes.io/empty-dir/de1b9642-5b3f-490f-a7bd-adf4247f7395-must-gather-output" (OuterVolumeSpecName: "must-gather-output") pod "de1b9642-5b3f-490f-a7bd-adf4247f7395" (UID: "de1b9642-5b3f-490f-a7bd-adf4247f7395"). InnerVolumeSpecName "must-gather-output". PluginName "kubernetes.io/empty-dir", VolumeGidValue "" Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.398973 4875 reconciler_common.go:293] "Volume detached for volume \"must-gather-output\" (UniqueName: \"kubernetes.io/empty-dir/de1b9642-5b3f-490f-a7bd-adf4247f7395-must-gather-output\") on node \"crc\" DevicePath \"\"" Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.399016 4875 reconciler_common.go:293] "Volume detached for volume \"kube-api-access-w67gx\" (UniqueName: \"kubernetes.io/projected/de1b9642-5b3f-490f-a7bd-adf4247f7395-kube-api-access-w67gx\") on node \"crc\" DevicePath \"\"" Nov 26 00:34:07 crc kubenswrapper[4875]: I1126 00:34:07.536846 4875 kubelet_volumes.go:163] "Cleaned up orphaned pod volumes dir" podUID="de1b9642-5b3f-490f-a7bd-adf4247f7395" path="/var/lib/kubelet/pods/de1b9642-5b3f-490f-a7bd-adf4247f7395/volumes" Nov 26 00:34:08 crc kubenswrapper[4875]: I1126 00:34:08.088329 4875 util.go:48] "No ready sandbox for pod can be found. Need to start a new one" pod="openshift-must-gather-55lm5/must-gather-kdgps" Nov 26 00:34:18 crc kubenswrapper[4875]: I1126 00:34:18.531192 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:34:18 crc kubenswrapper[4875]: E1126 00:34:18.532534 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:34:32 crc kubenswrapper[4875]: I1126 00:34:32.529053 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:34:32 crc kubenswrapper[4875]: E1126 00:34:32.529896 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:34:46 crc kubenswrapper[4875]: I1126 00:34:46.529260 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:34:46 crc kubenswrapper[4875]: E1126 00:34:46.530130 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:34:58 crc kubenswrapper[4875]: I1126 00:34:58.528541 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:34:58 crc kubenswrapper[4875]: E1126 00:34:58.529402 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:35:09 crc kubenswrapper[4875]: I1126 00:35:09.203772 4875 scope.go:117] "RemoveContainer" containerID="84c96ccb6b5a90259518349acd0c666c4f57bb52aaf4ab867479660aecaea788" Nov 26 00:35:09 crc kubenswrapper[4875]: I1126 00:35:09.225979 4875 scope.go:117] "RemoveContainer" containerID="fbd93bc01d32a9c4f6a2d60e409d60bb12bb11b093e8dd30d0eef22a59412ba0" Nov 26 00:35:11 crc kubenswrapper[4875]: I1126 00:35:11.529338 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:35:11 crc kubenswrapper[4875]: E1126 00:35:11.530166 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:35:25 crc kubenswrapper[4875]: I1126 00:35:25.529943 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:35:25 crc kubenswrapper[4875]: E1126 00:35:25.531112 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:35:39 crc kubenswrapper[4875]: I1126 00:35:39.528861 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:35:39 crc kubenswrapper[4875]: E1126 00:35:39.529684 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:35:50 crc kubenswrapper[4875]: I1126 00:35:50.529339 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:35:50 crc kubenswrapper[4875]: E1126 00:35:50.530455 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:36:04 crc kubenswrapper[4875]: I1126 00:36:04.528982 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:36:04 crc kubenswrapper[4875]: E1126 00:36:04.529795 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:36:17 crc kubenswrapper[4875]: I1126 00:36:17.532605 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:36:17 crc kubenswrapper[4875]: E1126 00:36:17.535270 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:36:30 crc kubenswrapper[4875]: I1126 00:36:30.529073 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:36:30 crc kubenswrapper[4875]: E1126 00:36:30.529961 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" Nov 26 00:36:43 crc kubenswrapper[4875]: I1126 00:36:43.529213 4875 scope.go:117] "RemoveContainer" containerID="5d035f080764919b1a1cc8b64c0f43d53b6b36ff26c916f5425dc21761337921" Nov 26 00:36:43 crc kubenswrapper[4875]: E1126 00:36:43.529945 4875 pod_workers.go:1301] "Error syncing pod, skipping" err="failed to \"StartContainer\" for \"machine-config-daemon\" with CrashLoopBackOff: \"back-off 5m0s restarting failed container=machine-config-daemon pod=machine-config-daemon-9mztb_openshift-machine-config-operator(0d4fb5a7-3ade-467a-9640-d744b1ef0c8e)\"" pod="openshift-machine-config-operator/machine-config-daemon-9mztb" podUID="0d4fb5a7-3ade-467a-9640-d744b1ef0c8e" var/home/core/zuul-output/logs/crc-cloud-workdir-crc-all-logs.tar.gz0000644000175000000000000000005515111445653024452 0ustar coreroot‹íÁ  ÷Om7 €7šÞ'(var/home/core/zuul-output/logs/crc-cloud/0000755000175000000000000000000015111445653017367 5ustar corerootvar/home/core/zuul-output/artifacts/0000755000175000017500000000000015111441734016506 5ustar corecorevar/home/core/zuul-output/docs/0000755000175000017500000000000015111441734015456 5ustar corecore